text stringlengths 11 4.05M |
|---|
package model
type User struct {
Id int
Username string `sql:"not null;unique"`
Password string `sql:"-"`
HashedPassword []byte `sql:"-"`
Email string `sql:"not null;unique"`
Firstname string `sql:"-"`
Lastname string `sql:"-"`
DisplayName string
}
func (u User) String() string {
return u.DisplayName
}
|
package example
var Var = 0
var (
VarA = "a"
VarB = "b"
VarC = "c"
)
var Notype string
var (
Test1 = 0
Test2 int
)
const (
C1 = iota
_
C3
C4
C6 = ""
)
// Const
const Const = 1
var Vartype string
// Mult
const (
ConstA = "a"
// Test
ConstB = "b"
ConstC = "c"
)
// AB
const A, B = 2, 3
var Empty int
var Called = String("a")
|
package tengo
import (
"errors"
"reflect"
"testing"
)
func Test_builtinDelete(t *testing.T) {
type args struct {
args []Object
}
tests := []struct {
name string
args args
want Object
wantErr bool
wantedErr error
target interface{}
}{
//Map
{name: "invalid-arg", args: args{[]Object{&String{}, &String{}}}, wantErr: true,
wantedErr: ErrInvalidArgumentType{Name: "first", Expected: "map", Found: "string"}},
{name: "no-args", wantErr: true, wantedErr: ErrWrongNumArguments},
{name: "empty-args", args: args{[]Object{}}, wantErr: true, wantedErr: ErrWrongNumArguments},
{name: "3-args", args: args{[]Object{(*Map)(nil), (*String)(nil), (*String)(nil)}}, wantErr: true, wantedErr: ErrWrongNumArguments},
{name: "nil-map-empty-key", args: args{[]Object{&Map{}, &String{}}}, want: UndefinedValue},
{name: "nil-map-nonstr-key", args: args{[]Object{&Map{}, &Int{}}}, wantErr: true,
wantedErr: ErrInvalidArgumentType{Name: "second", Expected: "string", Found: "int"}},
{name: "nil-map-no-key", args: args{[]Object{&Map{}}}, wantErr: true,
wantedErr: ErrWrongNumArguments},
{name: "map-missing-key",
args: args{
[]Object{
&Map{Value: map[string]Object{
"key": &String{Value: "value"},
}},
&String{Value: "key1"},
}},
want: UndefinedValue,
target: &Map{Value: map[string]Object{"key": &String{Value: "value"}}},
},
{name: "map-emptied",
args: args{
[]Object{
&Map{Value: map[string]Object{
"key": &String{Value: "value"},
}},
&String{Value: "key"},
}},
want: UndefinedValue,
target: &Map{Value: map[string]Object{}},
},
{name: "map-multi-keys",
args: args{
[]Object{
&Map{Value: map[string]Object{
"key1": &String{Value: "value1"},
"key2": &Int{Value: 10},
}},
&String{Value: "key1"},
}},
want: UndefinedValue,
target: &Map{Value: map[string]Object{"key2": &Int{Value: 10}}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := builtinDelete(tt.args.args...)
if (err != nil) != tt.wantErr {
t.Errorf("builtinDelete() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr && !errors.Is(err, tt.wantedErr) {
if err.Error() != tt.wantedErr.Error() {
t.Errorf("builtinDelete() error = %v, wantedErr %v", err, tt.wantedErr)
return
}
}
if got != tt.want {
t.Errorf("builtinDelete() = %v, want %v", got, tt.want)
return
}
if !tt.wantErr && tt.target != nil {
switch v := tt.args.args[0].(type) {
case *Map, *Array:
if !reflect.DeepEqual(tt.target, tt.args.args[0]) {
t.Errorf("builtinDelete() objects are not equal got: %+v, want: %+v", tt.args.args[0], tt.target)
}
default:
t.Errorf("builtinDelete() unsuporrted arg[0] type %s", v.TypeName())
return
}
}
})
}
}
|
// @Description 发送邮件
// @Author jiangyang
// @Created 2020/11/17 4:12 下午
// Example Config:
// email:
// user:
// pass:
// host: smtp.qq.com
// port: 465
package email
import (
"github.com/sirupsen/logrus"
"gopkg.in/gomail.v2"
)
var cfg *Config
type Config struct {
User string `json:"user" yaml:"user"`
Pass string `json:"pass" yaml:"pass"`
Host string `json:"host" yaml:"host"`
Port int `json:"port" yaml:"port"`
}
func Init(c Config) {
cfg = &c
logrus.Info("email init successfully")
}
func Conn() *Config {
return cfg
}
// 发送单封邮件
func SendMail(mailTo []string, subject string, body string) error {
m := gomail.NewMessage()
m.SetHeader("From", "<"+cfg.User+">")
m.SetHeader("To", mailTo...)
m.SetHeader("Subject", subject)
m.SetBody("text/html", body)
d := gomail.NewDialer(cfg.Host, cfg.Port, cfg.User, cfg.Pass)
err := d.DialAndSend(m)
return err
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains the implementation of the resource that manages a specific project.
package eawx
import (
"github.com/golang/glog"
"github.com/lz006/extended-awx-client-go/eawx/internal/data"
yaml "gopkg.in/yaml.v2"
)
type HostResource struct {
Resource
}
func NewHostResource(connection *Connection, path string) *HostResource {
resource := new(HostResource)
resource.connection = connection
resource.path = path
return resource
}
func (r *HostResource) Get() *HostGetRequest {
request := new(HostGetRequest)
request.resource = &r.Resource
return request
}
type HostGetRequest struct {
Request
}
func (r *HostGetRequest) Send() (response *HostGetResponse, err error) {
output := new(data.HostGetResponse)
err = r.get(output)
if err != nil {
return
}
response = new(HostGetResponse)
response.result = new(Host)
response.result.id = output.Id
response.result.name = output.Name
fromGroupArray := output.InventoryGroups.HostGroups.GroupArray
toStringArray := make([]*string, len(fromGroupArray))
for j := 0; j < len(fromGroupArray); j++ {
(*toStringArray[j]) = fromGroupArray[j].Name
}
response.result.groups = toStringArray
var vars *data.HostVariables
err = yaml.Unmarshal([]byte(output.HostVars), &vars)
if err != nil {
glog.Warningf("Error parsing: %v", err)
}
if vars != nil {
response.result.ip = vars.IP
}
return
}
type HostGetResponse struct {
result *Host
}
func (r *HostGetResponse) Result() *Host {
return r.result
}
|
package ionic
import (
"fmt"
"regexp"
"strings"
"github.com/ion-channel/ionic/util"
"github.com/google/uuid"
"github.com/ion-channel/ionic/aliases"
"github.com/ion-channel/tools-golang/spdx"
"github.com/ion-channel/tools-golang/spdxlib"
)
type packageInfo struct {
Name string
Version string
DownloadLocation string
Description string
Organization string
CPE string
PURL string
}
// packageInfoFromPackage takes either an spdx.Package2_1 or spdx.Package2_2 and returns a packageInfo object.
// This is used to convert SPDX packages to version-agnostic representations of the data we need.
func packageInfoFromPackage(spdxPackage interface{}) packageInfo {
var name, version, downloadLocation, description, organization, cpe, purl string
switch spdxPackage.(type) {
case spdx.Package2_1:
packageTyped := spdxPackage.(spdx.Package2_1)
name = packageTyped.PackageName
version = packageTyped.PackageVersion
downloadLocation = packageTyped.PackageDownloadLocation
description = packageTyped.PackageDescription
if packageTyped.PackageSupplier != nil && packageTyped.PackageSupplier.SupplierType == "Organization" {
organization = packageTyped.PackageSupplier.Supplier
}
for _, externalRef := range packageTyped.PackageExternalReferences {
if externalRef.Category == "SECURITY" && externalRefIsCPE(externalRef.RefType) {
cpe = externalRef.Locator
} else if externalRef.Category == "PACKAGE-MANAGER" && externalRefIsPURL(externalRef.RefType) {
purl = externalRef.Locator
}
}
case spdx.Package2_2:
packageTyped := spdxPackage.(spdx.Package2_2)
name = packageTyped.PackageName
version = packageTyped.PackageVersion
downloadLocation = packageTyped.PackageDownloadLocation
description = packageTyped.PackageDescription
if packageTyped.PackageSupplier != nil && packageTyped.PackageSupplier.SupplierType == "Organization" {
organization = packageTyped.PackageSupplier.Supplier
}
for _, externalRef := range packageTyped.PackageExternalReferences {
if externalRefIsCPE(externalRef.RefType) {
cpe = externalRef.Locator
} else if externalRefIsPURL(externalRef.RefType) {
purl = externalRef.Locator
}
}
}
return packageInfo{
Name: name,
Version: version,
DownloadLocation: downloadLocation,
Description: description,
Organization: organization,
CPE: cpe,
PURL: purl,
}
}
// ProjectsFromSPDX parses packages from an SPDX Document (v2.1 or v2.2) into Projects.
// The given document must be of the type *spdx.Document2_1 or *spdx.Document2_2.
// A package in the document must have a valid, resolveable PackageDownloadLocation in order to create a project
func ProjectsFromSPDX(doc interface{}, includeDependencies bool) ([]Project, error) {
// use a SPDX-version-agnostic container for tracking package info
packageInfos := []packageInfo{}
switch doc.(type) {
case *spdx.Document2_1:
docTyped := doc.(*spdx.Document2_1)
if includeDependencies {
// just get all of the packages
for _, spdxPackage := range docTyped.Packages {
packageInfos = append(packageInfos, packageInfoFromPackage(*spdxPackage))
}
} else {
// get only the top-level packages
topLevelPkgIDs, err := spdxlib.GetDescribedPackageIDs2_1(docTyped)
if err != nil {
return nil, fmt.Errorf("failed to retrieve described packages from SPDX 2.1 document: %s", err.Error())
}
for _, spdxPackage := range docTyped.Packages {
var isTopLevelPackage bool
for _, pkgID := range topLevelPkgIDs {
if pkgID == spdxPackage.PackageSPDXIdentifier {
isTopLevelPackage = true
break
}
}
if !isTopLevelPackage {
continue
}
packageInfos = append(packageInfos, packageInfoFromPackage(*spdxPackage))
}
}
case *spdx.Document2_2:
docTyped := doc.(*spdx.Document2_2)
if includeDependencies {
// just get all of the packages
for _, spdxPackage := range docTyped.Packages {
packageInfos = append(packageInfos, packageInfoFromPackage(*spdxPackage))
}
} else {
// get only the top-level packages
topLevelPkgIDs, err := spdxlib.GetDescribedPackageIDs2_2(docTyped)
if err != nil {
return nil, fmt.Errorf("failed to retrieve described packages from SPDX 2.2 document: %s", err.Error())
}
for _, spdxPackage := range docTyped.Packages {
var isTopLevelPackage bool
for _, pkgID := range topLevelPkgIDs {
if pkgID == spdxPackage.PackageSPDXIdentifier {
isTopLevelPackage = true
break
}
}
if !isTopLevelPackage {
continue
}
packageInfos = append(packageInfos, packageInfoFromPackage(*spdxPackage))
}
}
default:
return nil, fmt.Errorf("wrong document type given, need *spdx.Document2_1 or *spdx.Document2_2")
}
projs := []Project{}
for ii := range packageInfos {
pkg := packageInfos[ii]
// info we need to parse out of the SoftwareList
var ptype, source, branch string
tmpID := uuid.New().String()
if pkg.DownloadLocation == "" || pkg.DownloadLocation == "NOASSERTION" || pkg.DownloadLocation == "NONE" {
ptype = "source_unavailable"
} else if strings.Contains(pkg.DownloadLocation, "git") {
ptype = "git"
// SPDX spec says that git URLs can look like "git+https://github.com/..."
// we need to strip off the "git+"
if strings.Index(pkg.DownloadLocation, "git+") == 0 {
source = pkg.DownloadLocation[4:]
} else {
source = pkg.DownloadLocation
}
source, branch = util.ParseGitURL(source)
} else {
source = pkg.DownloadLocation
ptype = "artifact"
}
proj := Project{
ID: &tmpID,
Branch: &branch,
Description: &pkg.Description,
Type: &ptype,
Source: &source,
Name: &pkg.Name,
Active: true,
Monitor: true,
CPE: pkg.CPE,
PURL: pkg.PURL,
}
// check if version, org, or name are not empty strings
if len(pkg.Version) > 0 || len(pkg.Organization) > 0 || len(pkg.Name) > 0 {
v := pkg.Version
if pkg.Version == "NOASSERTION" {
v = ""
}
proj.Aliases = []aliases.Alias{{
Name: pkg.Name,
Org: pkg.Organization,
Version: v,
}}
}
// make sure we don't already have an equivalent project
if ProjectSliceContains(projs, proj) {
continue
}
projs = append(projs, proj)
}
return projs, nil
}
// externalRefIsCPE returns true if the given external reference type refers to a CPE.
func externalRefIsCPE(externalRefType string) bool {
return strings.ToLower(externalRefType) == "cpe23type" ||
strings.ToLower(externalRefType) == "cpe22type" ||
externalRefType == "http://spdx.org/rdf/references/cpe23Type" ||
externalRefType == "http://spdx.org/rdf/references/cpe22Type"
}
// externalRefIsPURL returns true if the given external reference type refers to a PURL.
func externalRefIsPURL(externalRefType string) bool {
return externalRefType == "purl" || externalRefType == "http://spdx.org/rdf/references/purl"
}
// Helper function to parse email from SPDX Creator info
// SPDX email comes in the form Creator: Person: My Name (myname@mail.com)
// returns empty string if no email is found
func parseCreatorEmail(creatorPersons []string) string {
if len(creatorPersons) > 0 {
re := regexp.MustCompile(`\((.*?)\)`)
email := re.FindStringSubmatch(creatorPersons[0])
if len(email) > 0 && email != nil {
return email[1]
}
}
return ""
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/yue9944882/apiserver-builder-alpha-protobuf-example/pkg/apis/simple/common"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +resource:path=deepones
// DeepOne defines a resident of innsmouth
type DeepOne struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec DeepOneSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
Status DeepOneStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
type SamplePrimitiveAlias int64
// DeepOnesSpec defines the desired state of DeepOne
type DeepOneSpec struct {
// fish_required defines the number of fish required by the DeepOne.
// NOTE: the type has to be int64 instead of ambiguous int
FishRequired int64 `json:"fish_required,omitempty" protobuf:"varint,1,opt,name=fish_required"`
Sample SampleElem `json:"sample,omitempty" protobuf:"bytes,2,opt,name=sample"`
SamplePointer *SamplePointerElem `json:"sample_pointer,omitempty" protobuf:"bytes,3,opt,name=sample_pointer"`
SampleList []SampleListElem `json:"sample_list,omitempty" protobuf:"bytes,4,rep,name=sample_list"`
SamplePointerList []*SampleListPointerElem `json:"sample_pointer_list,omitempty" protobuf:"bytes,5,rep,name=sample_pointer_list"`
SampleMap map[string]SampleMapElem `json:"sample_map,omitempty" protobuf:"bytes,6,rep,name=sample_map"`
// NOTE: Maps using pointer as value type is not supported protobuf serialization
//SamplePointerMap map[string]*SampleMapPointerElem `json:"sample_pointer_map,omitempty" protobuf:"bytes,7,rep,name=sample_pointer_map"`
SamplePrimitiveAlias SamplePrimitiveAlias `json:"sample_primitive_alias,omitempty" protobuf:"varint,8,opt,name=sample_primitive_alias"`
// Example of using a constant
Const common.CustomType `json:"const,omitempty" protobuf:"bytes,9,opt,name=const"`
ConstPtr *common.CustomType `json:"constPtr,omitempty" protobuf:"bytes,10,opt,name=constPtr"`
ConstSlice []common.CustomType `json:"constSlice,omitempty" protobuf:"bytes,11,rep,name=constSlice"`
ConstMap map[string]common.CustomType `json:"constMap,omitempty" protobuf:"bytes,12,rep,name=constMap"`
// TODO: Fix issues with deep copy to make these work
//ConstSlicePtr []*common.CustomType `json:"constSlicePtr,omitempty"`
//ConstMapPtr map[string]*common.CustomType `json:"constMapPtr,omitempty"`
}
type SampleListElem struct {
Sub []SampleListSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SampleListSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
type SampleListPointerElem struct {
Sub []*SampleListPointerSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SampleListPointerSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
type SampleMapElem struct {
Sub map[string]SampleMapSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SampleMapSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
type SampleMapPointerElem struct {
// NOTE: Maps using pointer as value type is not supported protobuf serialization
//Sub map[string]*SampleMapPointerSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SampleMapPointerSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
type SamplePointerElem struct {
Sub *SamplePointerSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SamplePointerSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
type SampleElem struct {
Sub SampleSubElem `json:"sub,omitempty" protobuf:"bytes,1,rep,name=sub"`
}
type SampleSubElem struct {
Foo string `json:"foo,omitempty" protobuf:"bytes,1,opt,name=foo"`
}
// DeepOneStatus defines the observed state of DeepOne
type DeepOneStatus struct {
// actual_fish defines the number of fish caught by the DeepOne.
// NOTE: the type has to be int64 instead of ambiguous int
ActualFish int64 `json:"actual_fish,omitempty" protobuf:"varint,1,opt,name=actual_fish"`
}
|
// Live Collection
package livecoll
|
package handler
import (
"encoding/json"
"log"
"github.com/gin-gonic/gin"
"github.com/Rakhimgaliev/tech-db/project/db"
"github.com/Rakhimgaliev/tech-db/project/models"
"github.com/jackc/pgx"
)
type handler struct {
conn *pgx.ConnPool
}
func NewConnPool(config *pgx.ConnConfig) *handler {
connPoolConfig := pgx.ConnPoolConfig{
ConnConfig: *config,
MaxConnections: 3,
AfterConnect: nil,
AcquireTimeout: 0,
}
connPool, err := pgx.NewConnPool(connPoolConfig)
if err != nil {
log.Fatal(err)
}
return &handler{
conn: connPool,
}
}
func (h *handler) Clear(context *gin.Context) {
err := db.Clear(h.conn)
if err != nil {
return
}
clearJSON, _ := json.Marshal("")
context.Data(200, "application/json", clearJSON)
return
}
func (h *handler) Status(context *gin.Context) {
status := models.Status{}
err := db.Status(h.conn, &status)
if err != nil {
return
}
statusJSON, _ := json.Marshal(status)
context.Data(200, "application/json", statusJSON)
return
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package beego
import (
"bytes"
"errors"
"net/http"
"net/url"
"strings"
"github.com/GoAdminGroup/go-admin/adapter"
gctx "github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/engine"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/plugins"
"github.com/GoAdminGroup/go-admin/plugins/admin/models"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant"
"github.com/GoAdminGroup/go-admin/template/types"
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
)
// Beego structure value is a Beego GoAdmin adapter.
type Beego struct {
adapter.BaseAdapter
ctx *context.Context
app *beego.App
}
func init() {
engine.Register(new(Beego))
}
// User implements the method Adapter.User.
func (bee *Beego) User(ctx interface{}) (models.UserModel, bool) {
return bee.GetUser(ctx, bee)
}
// Use implements the method Adapter.Use.
func (bee *Beego) Use(app interface{}, plugs []plugins.Plugin) error {
return bee.GetUse(app, plugs, bee)
}
// Content implements the method Adapter.Content.
func (bee *Beego) Content(ctx interface{}, getPanelFn types.GetPanelFn, fn gctx.NodeProcessor, navButtons ...types.Button) {
bee.GetContent(ctx, getPanelFn, bee, navButtons, fn)
}
type HandlerFunc func(ctx *context.Context) (types.Panel, error)
func Content(handler HandlerFunc) beego.FilterFunc {
return func(ctx *context.Context) {
engine.Content(ctx, func(ctx interface{}) (types.Panel, error) {
return handler(ctx.(*context.Context))
})
}
}
// SetApp implements the method Adapter.SetApp.
func (bee *Beego) SetApp(app interface{}) error {
var (
eng *beego.App
ok bool
)
if eng, ok = app.(*beego.App); !ok {
return errors.New("beego adapter SetApp: wrong parameter")
}
bee.app = eng
return nil
}
// AddHandler implements the method Adapter.AddHandler.
func (bee *Beego) AddHandler(method, path string, handlers gctx.Handlers) {
bee.app.Handlers.AddMethod(method, path, func(c *context.Context) {
for key, value := range c.Input.Params() {
if c.Request.URL.RawQuery == "" {
c.Request.URL.RawQuery += strings.ReplaceAll(key, ":", "") + "=" + value
} else {
c.Request.URL.RawQuery += "&" + strings.ReplaceAll(key, ":", "") + "=" + value
}
}
ctx := gctx.NewContext(c.Request)
ctx.SetHandlers(handlers).Next()
for key, head := range ctx.Response.Header {
c.ResponseWriter.Header().Add(key, head[0])
}
c.ResponseWriter.WriteHeader(ctx.Response.StatusCode)
if ctx.Response.Body != nil {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(ctx.Response.Body)
c.WriteString(buf.String())
}
})
}
// Name implements the method Adapter.Name.
func (*Beego) Name() string {
return "beego"
}
// SetContext implements the method Adapter.SetContext.
func (*Beego) SetContext(contextInterface interface{}) adapter.WebFrameWork {
var (
ctx *context.Context
ok bool
)
if ctx, ok = contextInterface.(*context.Context); !ok {
panic("beego adapter SetContext: wrong parameter")
}
return &Beego{ctx: ctx}
}
// Redirect implements the method Adapter.Redirect.
func (bee *Beego) Redirect() {
bee.ctx.Redirect(http.StatusFound, config.Url(config.GetLoginUrl()))
}
// SetContentType implements the method Adapter.SetContentType.
func (bee *Beego) SetContentType() {
bee.ctx.ResponseWriter.Header().Set("Content-Type", bee.HTMLContentType())
}
// Write implements the method Adapter.Write.
func (bee *Beego) Write(body []byte) {
_, _ = bee.ctx.ResponseWriter.Write(body)
}
// GetCookie implements the method Adapter.GetCookie.
func (bee *Beego) GetCookie() (string, error) {
return bee.ctx.GetCookie(bee.CookieKey()), nil
}
// Lang implements the method Adapter.Lang.
func (bee *Beego) Lang() string {
return bee.ctx.Request.URL.Query().Get("__ga_lang")
}
// Path implements the method Adapter.Path.
func (bee *Beego) Path() string {
return bee.ctx.Request.URL.Path
}
// Method implements the method Adapter.Method.
func (bee *Beego) Method() string {
return bee.ctx.Request.Method
}
// FormParam implements the method Adapter.FormParam.
func (bee *Beego) FormParam() url.Values {
_ = bee.ctx.Request.ParseMultipartForm(32 << 20)
return bee.ctx.Request.PostForm
}
// IsPjax implements the method Adapter.IsPjax.
func (bee *Beego) IsPjax() bool {
return bee.ctx.Request.Header.Get(constant.PjaxHeader) == "true"
}
// Query implements the method Adapter.Query.
func (bee *Beego) Query() url.Values {
return bee.ctx.Request.URL.Query()
}
|
package server
import (
"fmt"
"log"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
"github.com/balchua/balsa/pkg/config"
"github.com/balchua/balsa/pkg/fsm"
rafthandler "github.com/balchua/balsa/pkg/raft"
"github.com/balchua/balsa/pkg/store"
"github.com/gorilla/mux"
"github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb"
)
const (
maxPool = 3
tcpTimeout = 10 * time.Second
// The `retain` parameter controls how many
// snapshots are retained. Must be at least 1.
raftSnapShotRetain = 2
// raftLogCacheSize is the maximum number of logs to cache in-memory.
// This is used to reduce disk I/O for the recently committed entries.
raftLogCacheSize = 3
)
// Server struct handling server
type WebServer struct {
listenAddress string
r *mux.Router
raftHandler *rafthandler.Handler
store *store.StoreHandler
}
//JoinHandler handles requests to join the cluster
func (s *WebServer) JoinHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Join handler called")
nodeID := r.FormValue("nodeId")
log.Println(fmt.Sprintf("node is %s", nodeID))
raftPort := r.FormValue("raftPort")
port, err := strconv.Atoi(raftPort)
if err != nil {
log.Fatal("Invalid port %e", err)
}
s.raftHandler.Join(port, nodeID)
}
//SetHandler handles setting values to the store (fsm)
func (s *WebServer) SetHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Set handler called")
key := r.FormValue("key")
value := r.FormValue("value")
s.store.Store(key, value)
}
//GetHandler handles getting values from the store (fsm)
func (s *WebServer) GetHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Get handler called")
}
//Start will start the http server
func (s *WebServer) Start() {
srv := &http.Server{
Handler: s.r,
Addr: s.listenAddress,
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
func initializeRaft(config *config.Config) *rafthandler.Handler {
var raftBinAddr = fmt.Sprintf("127.0.0.1:%d", config.RaftPort)
raftConf := raft.DefaultConfig()
raftConf.LocalID = raft.ServerID(config.RaftNodeId)
raftConf.SnapshotThreshold = 1024
store, err := raftboltdb.NewBoltStore(filepath.Join(config.RaftVolume, "raft.dataRepo"))
if err != nil {
log.Fatal(err)
return nil
}
// Wrap the store in a LogCache to improve performance.
cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
if err != nil {
log.Fatal(err)
return nil
}
snapshotStore, err := raft.NewFileSnapshotStore(config.RaftVolume, raftSnapShotRetain, os.Stdout)
if err != nil {
log.Fatal(err)
return nil
}
tcpAddr, err := net.ResolveTCPAddr("tcp", raftBinAddr)
if err != nil {
log.Fatal(err)
return nil
}
transport, err := raft.NewTCPTransport(raftBinAddr, tcpAddr, maxPool, tcpTimeout, os.Stdout)
if err != nil {
log.Fatal(err)
return nil
}
fsm := fsm.NewKvFSM()
raftServer, err := raft.NewRaft(raftConf, fsm, cacheStore, store, snapshotStore, transport)
if err != nil {
log.Fatal(err)
return nil
}
// always start single server as a leader
configuration := raft.Configuration{
Servers: []raft.Server{
{
ID: raft.ServerID(config.RaftNodeId),
Address: transport.LocalAddr(),
},
},
}
raftServer.BootstrapCluster(configuration)
return rafthandler.New(raftServer)
}
//New instantiates the handlers
func New(config *config.Config) *WebServer {
handler := initializeRaft(config)
listenAddress := fmt.Sprintf("%s:%d", "127.0.0.1", config.HttpPort)
storeHandler := store.New(handler.GetRaft())
s := &WebServer{
r: mux.NewRouter(),
listenAddress: listenAddress,
raftHandler: handler,
store: storeHandler,
}
s.r.HandleFunc("/join", s.JoinHandler)
s.r.HandleFunc("/set", s.SetHandler)
s.r.HandleFunc("/get", s.GetHandler)
return s
}
|
package main
import (
"fmt"
)
type RankT int
const (
Ace = iota
Two
Three
Four
Five
Six
Seven
Eight
Nine
Ten
Jack
Queen
King
LastRank
)
var Ranks = []RankT{
Ace,
Two,
Three,
Four,
Five,
Six,
Seven,
Eight,
Nine,
Ten,
Jack,
Queen,
King,
}
// Used for sorting
type RankArr []int
func (r RankArr) Len() int {
return len(r)
}
func (r RankArr) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
func (r RankArr) Less(i, j int) bool {
return r[i] < r[j]
}
func (r RankT) String() string {
var value string
switch r {
case Ace:
value = "A"
case Two:
value = "2"
case Three:
value = "3"
case Four:
value = "4"
case Five:
value = "5"
case Six:
value = "6"
case Seven:
value = "7"
case Eight:
value = "8"
case Nine:
value = "9"
case Ten:
value = "10"
case Jack:
value = "J"
case Queen:
value = "Q"
case King:
value = "K"
}
return value
}
type SuitT int
const (
Hearts = iota
Clubs
Diamonds
Spades
LastSuit
)
var Suits = []SuitT{
Hearts,
Clubs,
Diamonds,
Spades,
}
func (s SuitT) String() string {
var value string
switch s {
case Hearts:
value = "H"
case Clubs:
value = "C"
case Diamonds:
value = "D"
case Spades:
value = "S"
}
return value
}
type Card struct {
Rank RankT
Suit SuitT
}
func (c *Card) String() string {
cardStr := fmt.Sprintf("%v %v", c.Rank, c.Suit)
return cardStr
}
|
package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"testing"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/testutils"
qt "github.com/frankban/quicktest"
)
func vmlinuxSpec(tb testing.TB) *Spec {
tb.Helper()
// /sys/kernel/btf was introduced in 341dfcf8d78e ("btf: expose BTF info
// through sysfs"), which shipped in Linux 5.4.
testutils.SkipOnOldKernel(tb, "5.4", "vmlinux BTF in sysfs")
spec, fallback, err := kernelSpec()
if err != nil {
tb.Fatal(err)
}
if fallback {
tb.Fatal("/sys/kernel/btf/vmlinux is not available")
}
return spec.Copy()
}
type specAndRawBTF struct {
raw []byte
spec *Spec
}
var vmlinuxTestdata = internal.Memoize(func() (specAndRawBTF, error) {
b, err := internal.ReadAllCompressed("testdata/vmlinux.btf.gz")
if err != nil {
return specAndRawBTF{}, err
}
spec, err := loadRawSpec(bytes.NewReader(b), binary.LittleEndian, nil)
if err != nil {
return specAndRawBTF{}, err
}
return specAndRawBTF{b, spec}, nil
})
func vmlinuxTestdataReader(tb testing.TB) *bytes.Reader {
tb.Helper()
td, err := vmlinuxTestdata()
if err != nil {
tb.Fatal(err)
}
return bytes.NewReader(td.raw)
}
func vmlinuxTestdataSpec(tb testing.TB) *Spec {
tb.Helper()
td, err := vmlinuxTestdata()
if err != nil {
tb.Fatal(err)
}
return td.spec.Copy()
}
func parseELFBTF(tb testing.TB, file string) *Spec {
tb.Helper()
spec, err := LoadSpec(file)
if err != nil {
tb.Fatal("Can't load BTF:", err)
}
return spec
}
func TestAnyTypesByName(t *testing.T) {
testutils.Files(t, testutils.Glob(t, "testdata/relocs-*.elf"), func(t *testing.T, file string) {
spec := parseELFBTF(t, file)
types, err := spec.AnyTypesByName("ambiguous")
if err != nil {
t.Fatal(err)
}
if len(types) != 1 {
t.Fatalf("expected to receive exactly 1 types from querying ambiguous type, got: %v", types)
}
types, err = spec.AnyTypesByName("ambiguous___flavour")
if err != nil {
t.Fatal(err)
}
if len(types) != 1 {
t.Fatalf("expected to receive exactly 1 type from querying ambiguous flavour, got: %v", types)
}
})
}
func TestTypeByNameAmbiguous(t *testing.T) {
testutils.Files(t, testutils.Glob(t, "testdata/relocs-*.elf"), func(t *testing.T, file string) {
spec := parseELFBTF(t, file)
var typ *Struct
if err := spec.TypeByName("ambiguous", &typ); err != nil {
t.Fatal(err)
}
if name := typ.TypeName(); name != "ambiguous" {
t.Fatal("expected type name 'ambiguous', got:", name)
}
if err := spec.TypeByName("ambiguous___flavour", &typ); err != nil {
t.Fatal(err)
}
if name := typ.TypeName(); name != "ambiguous___flavour" {
t.Fatal("expected type name 'ambiguous___flavour', got:", name)
}
})
}
func TestTypeByName(t *testing.T) {
spec := vmlinuxTestdataSpec(t)
for _, typ := range []interface{}{
nil,
Struct{},
&Struct{},
[]Struct{},
&[]Struct{},
map[int]Struct{},
&map[int]Struct{},
int(0),
new(int),
} {
t.Run(fmt.Sprintf("%T", typ), func(t *testing.T) {
// spec.TypeByName MUST fail if typ is a nil btf.Type.
if err := spec.TypeByName("iphdr", typ); err == nil {
t.Fatalf("TypeByName does not fail with type %T", typ)
}
})
}
// spec.TypeByName MUST return the same address for multiple calls with the same type name.
var iphdr1, iphdr2 *Struct
if err := spec.TypeByName("iphdr", &iphdr1); err != nil {
t.Fatal(err)
}
if err := spec.TypeByName("iphdr", &iphdr2); err != nil {
t.Fatal(err)
}
if iphdr1 != iphdr2 {
t.Fatal("multiple TypeByName calls for `iphdr` name do not return the same addresses")
}
// It's valid to pass a *Type to TypeByName.
typ := Type(iphdr2)
if err := spec.TypeByName("iphdr", &typ); err != nil {
t.Fatal("Can't look up using *Type:", err)
}
// Excerpt from linux/ip.h, https://elixir.bootlin.com/linux/latest/A/ident/iphdr
//
// struct iphdr {
// #if defined(__LITTLE_ENDIAN_BITFIELD)
// __u8 ihl:4, version:4;
// #elif defined (__BIG_ENDIAN_BITFIELD)
// __u8 version:4, ihl:4;
// #else
// ...
// }
//
// The BTF we test against is for little endian.
m := iphdr1.Members[1]
if m.Name != "version" {
t.Fatal("Expected version as the second member, got", m.Name)
}
td, ok := m.Type.(*Typedef)
if !ok {
t.Fatalf("version member of iphdr should be a __u8 typedef: actual: %T", m.Type)
}
u8, ok := td.Type.(*Int)
if !ok {
t.Fatalf("__u8 typedef should point to an Int type: actual: %T", td.Type)
}
if m.BitfieldSize != 4 {
t.Fatalf("incorrect bitfield size: expected: 4 actual: %d", m.BitfieldSize)
}
if u8.Encoding != 0 {
t.Fatalf("incorrect encoding of an __u8 int: expected: 0 actual: %x", u8.Encoding)
}
if m.Offset != 4 {
t.Fatalf("incorrect bitfield offset: expected: 4 actual: %d", m.Offset)
}
}
func BenchmarkParseVmlinux(b *testing.B) {
rd := vmlinuxTestdataReader(b)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, err := rd.Seek(0, io.SeekStart); err != nil {
b.Fatal(err)
}
if _, err := loadRawSpec(rd, binary.LittleEndian, nil); err != nil {
b.Fatal("Can't load BTF:", err)
}
}
}
func TestParseCurrentKernelBTF(t *testing.T) {
spec := vmlinuxSpec(t)
if len(spec.namedTypes) == 0 {
t.Fatal("Empty kernel BTF")
}
totalBytes := 0
distinct := 0
seen := make(map[string]bool)
for _, str := range spec.strings.strings {
totalBytes += len(str)
if !seen[str] {
distinct++
seen[str] = true
}
}
t.Logf("%d strings total, %d distinct", len(spec.strings.strings), distinct)
t.Logf("Average string size: %.0f", float64(totalBytes)/float64(len(spec.strings.strings)))
}
func TestFindVMLinux(t *testing.T) {
file, err := findVMLinux()
testutils.SkipIfNotSupported(t, err)
if err != nil {
t.Fatal("Can't find vmlinux:", err)
}
defer file.Close()
spec, err := loadSpecFromELF(file)
if err != nil {
t.Fatal("Can't load BTF:", err)
}
if len(spec.namedTypes) == 0 {
t.Fatal("Empty kernel BTF")
}
}
func TestLoadSpecFromElf(t *testing.T) {
testutils.Files(t, testutils.Glob(t, "../testdata/loader-e*.elf"), func(t *testing.T, file string) {
spec := parseELFBTF(t, file)
vt, err := spec.TypeByID(0)
if err != nil {
t.Error("Can't retrieve void type by ID:", err)
}
if _, ok := vt.(*Void); !ok {
t.Errorf("Expected Void for type id 0, but got: %T", vt)
}
var bpfMapDef *Struct
if err := spec.TypeByName("bpf_map_def", &bpfMapDef); err != nil {
t.Error("Can't find bpf_map_def:", err)
}
var tmp *Void
if err := spec.TypeByName("totally_bogus_type", &tmp); !errors.Is(err, ErrNotFound) {
t.Error("TypeByName doesn't return ErrNotFound:", err)
}
var fn *Func
if err := spec.TypeByName("global_fn", &fn); err != nil {
t.Error("Can't find global_fn():", err)
} else {
if fn.Linkage != GlobalFunc {
t.Error("Expected global linkage:", fn)
}
}
var v *Var
if err := spec.TypeByName("key3", &v); err != nil {
t.Error("Can't find key3:", err)
} else {
if v.Linkage != GlobalVar {
t.Error("Expected global linkage:", v)
}
}
})
}
func TestVerifierError(t *testing.T) {
_, err := NewHandle(&Builder{})
testutils.SkipIfNotSupported(t, err)
var ve *internal.VerifierError
if !errors.As(err, &ve) {
t.Fatalf("expected a VerifierError, got: %v", err)
}
if ve.Truncated {
t.Fatalf("expected non-truncated verifier log: %v", err)
}
}
func TestLoadKernelSpec(t *testing.T) {
if _, err := os.Stat("/sys/kernel/btf/vmlinux"); os.IsNotExist(err) {
t.Skip("/sys/kernel/btf/vmlinux not present")
}
_, err := LoadKernelSpec()
if err != nil {
t.Fatal("Can't load kernel spec:", err)
}
}
func TestGuessBTFByteOrder(t *testing.T) {
bo := guessRawBTFByteOrder(vmlinuxTestdataReader(t))
if bo != binary.LittleEndian {
t.Fatalf("Guessed %s instead of %s", bo, binary.LittleEndian)
}
}
func TestSpecCopy(t *testing.T) {
spec := parseELFBTF(t, "../testdata/loader-el.elf")
if len(spec.types) < 1 {
t.Fatal("Not enough types")
}
cpy := spec.Copy()
for i := range cpy.types {
if _, ok := cpy.types[i].(*Void); ok {
// Since Void is an empty struct, a Type interface value containing
// &Void{} stores (*Void, nil). Since interface equality first compares
// the type and then the concrete value, Void is always equal.
continue
}
if cpy.types[i] == spec.types[i] {
t.Fatalf("Type at index %d is not a copy: %T == %T", i, cpy.types[i], spec.types[i])
}
}
}
func TestSpecTypeByID(t *testing.T) {
_, err := newSpec().TypeByID(0)
qt.Assert(t, err, qt.IsNil)
_, err = newSpec().TypeByID(1)
qt.Assert(t, err, qt.ErrorIs, ErrNotFound)
}
func TestHaveBTF(t *testing.T) {
testutils.CheckFeatureTest(t, haveBTF)
}
func TestHaveMapBTF(t *testing.T) {
testutils.CheckFeatureTest(t, haveMapBTF)
}
func TestHaveProgBTF(t *testing.T) {
testutils.CheckFeatureTest(t, haveProgBTF)
}
func TestHaveFuncLinkage(t *testing.T) {
testutils.CheckFeatureTest(t, haveFuncLinkage)
}
func ExampleSpec_TypeByName() {
// Acquire a Spec via one of its constructors.
spec := new(Spec)
// Declare a variable of the desired type
var foo *Struct
if err := spec.TypeByName("foo", &foo); err != nil {
// There is no struct with name foo, or there
// are multiple possibilities.
}
// We've found struct foo
fmt.Println(foo.Name)
}
func TestTypesIterator(t *testing.T) {
types := []Type{(*Void)(nil), &Int{Size: 4}, &Int{Size: 2}}
b, err := NewBuilder(types[1:])
if err != nil {
t.Fatal(err)
}
raw, err := b.Marshal(nil, nil)
if err != nil {
t.Fatal(err)
}
spec, err := LoadSpecFromReader(bytes.NewReader(raw))
if err != nil {
t.Fatal(err)
}
iter := spec.Iterate()
for i, typ := range types {
if !iter.Next() {
t.Fatal("Iterator ended early at item", i)
}
qt.Assert(t, iter.Type, qt.DeepEquals, typ)
}
if iter.Next() {
t.Fatalf("Iterator yielded too many items: %p (%[1]T)", iter.Type)
}
}
func TestLoadSplitSpecFromReader(t *testing.T) {
spec := vmlinuxTestdataSpec(t)
f, err := os.Open("testdata/btf_testmod.btf")
if err != nil {
t.Fatal(err)
}
defer f.Close()
splitSpec, err := LoadSplitSpecFromReader(f, spec)
if err != nil {
t.Fatal(err)
}
typ, err := splitSpec.AnyTypeByName("bpf_testmod_init")
if err != nil {
t.Fatal(err)
}
typeID, err := splitSpec.TypeID(typ)
if err != nil {
t.Fatal(err)
}
typeByID, err := splitSpec.TypeByID(typeID)
qt.Assert(t, err, qt.IsNil)
qt.Assert(t, typeByID, qt.Equals, typ)
fnType := typ.(*Func)
fnProto := fnType.Type.(*FuncProto)
// 'int' is defined in the base BTF...
intType, err := spec.AnyTypeByName("int")
if err != nil {
t.Fatal(err)
}
// ... but not in the split BTF
_, err = splitSpec.AnyTypeByName("int")
if err == nil {
t.Fatal("'int' is not supposed to be found in the split BTF")
}
if fnProto.Return != intType {
t.Fatalf("Return type of 'bpf_testmod_init()' (%s) does not match 'int' type (%s)",
fnProto.Return, intType)
}
// Check that copied split-BTF's spec has correct type indexing
splitSpecCopy := splitSpec.Copy()
copyType, err := splitSpecCopy.AnyTypeByName("bpf_testmod_init")
if err != nil {
t.Fatal(err)
}
copyTypeID, err := splitSpecCopy.TypeID(copyType)
if err != nil {
t.Fatal(err)
}
if copyTypeID != typeID {
t.Fatalf("'bpf_testmod_init` type ID (%d) does not match copied spec's (%d)",
typeID, copyTypeID)
}
}
func TestFixupDatasecLayout(t *testing.T) {
ds := &Datasec{
Size: 0, // Populated by fixup.
Vars: []VarSecinfo{
{Type: &Var{Type: &Int{Size: 4}}},
{Type: &Var{Type: &Int{Size: 1}}},
{Type: &Var{Type: &Int{Size: 1}}},
{Type: &Var{Type: &Int{Size: 2}}},
{Type: &Var{Type: &Int{Size: 16}}},
{Type: &Var{Type: &Int{Size: 8}}},
},
}
qt.Assert(t, fixupDatasecLayout(ds), qt.IsNil)
qt.Assert(t, ds.Size, qt.Equals, uint32(40))
qt.Assert(t, ds.Vars[0].Offset, qt.Equals, uint32(0))
qt.Assert(t, ds.Vars[1].Offset, qt.Equals, uint32(4))
qt.Assert(t, ds.Vars[2].Offset, qt.Equals, uint32(5))
qt.Assert(t, ds.Vars[3].Offset, qt.Equals, uint32(6))
qt.Assert(t, ds.Vars[4].Offset, qt.Equals, uint32(16))
qt.Assert(t, ds.Vars[5].Offset, qt.Equals, uint32(32))
}
func BenchmarkSpecCopy(b *testing.B) {
spec := vmlinuxTestdataSpec(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
spec.Copy()
}
}
|
package main
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"os"
"path/filepath"
"strconv"
"time"
)
//获取当前时间,并格式变换
func GetNowTime() string {
return time.Now().String()
}
//获取当前时间,并格式变换
func GetNowDateTimeAsYYMMDDHHMISS() string {
return string([]byte(GetNowTime())[:len("2015-01-01 12:13:14")])
}
//获取当前日期,并格式变换
func GetNowDateAsYYMMDD() string {
return string([]byte(GetNowTime())[:len("2015-01-01")])
}
//获取当前月份,并格式变换
func GetNowMonthAsYYMM() string {
return string([]byte(GetNowTime())[:len("2015-01")])
}
func Int162Bytes(value int16, data []byte) {
//binary.LittleEndian.PutUint16(data, uint16(value))
b_buf := bytes.NewBuffer([]byte{})
binary.Write(b_buf, binary.LittleEndian, value)
copy(data[:2], b_buf.Bytes())
}
func Uint162Bytes(value uint16, data []byte) {
binary.LittleEndian.PutUint16(data, value)
}
func Int322Bytes(value int32, data []byte) {
//binary.LittleEndian.PutUint32(data, uint32(value))
b_buf := bytes.NewBuffer([]byte{})
binary.Write(b_buf, binary.LittleEndian, value)
copy(data[:4], b_buf.Bytes())
}
func Uint322Bytes(value uint32, data []byte) {
binary.LittleEndian.PutUint32(data, value)
}
func Float322Bytes(data float32, value []byte) {
binary.LittleEndian.PutUint32(value, math.Float32bits(data))
}
func Float642Bytes(data float64, value []byte) {
binary.LittleEndian.PutUint64(value, math.Float64bits(data))
}
func GetInt32ValueByString(value string) int32 {
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
fmt.Println("GetInt32ValueByString", value, err)
}
return int32(v)
}
func Getfloat64ValueByString(value string) float64 {
v, err := strconv.ParseFloat(value, 10)
if err != nil {
fmt.Println("Getfloat64ValueByString", value, err)
}
return v
}
func Bytes2Uint16(data []byte) (value uint16) {
value = binary.LittleEndian.Uint16(data)
return value
}
func Bytes2Int16(data []byte) (value int16) {
b_buf := bytes.NewBuffer(data)
binary.Read(b_buf, binary.LittleEndian, &value)
return value
}
func Bytes2Uint32(data []byte) (value uint32) {
value = binary.LittleEndian.Uint32(data)
return value
}
func Bytes2Int32(data []byte) (value int32) {
b_buf := bytes.NewBuffer(data)
binary.Read(b_buf, binary.LittleEndian, &value)
return value
}
func Bytes2Uint64(data []byte) (value uint64) {
value = binary.LittleEndian.Uint64(data)
return value
}
func Bytes2float32(data []byte) (value float32) {
v := binary.LittleEndian.Uint32(data)
value = math.Float32frombits(v)
return value
}
func Bytes2float64(data []byte) (value float64) {
v := binary.LittleEndian.Uint32(data)
value = float64(math.Float32frombits(v))
return value
}
func EightBytes2float64(data []byte) (value float64) {
v := binary.LittleEndian.Uint64(data)
value = math.Float64frombits(v)
return value
}
func GetModulePath() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
fmt.Println("GetPath: ", err.Error())
return ""
}
dir += "/"
return dir
}
func SleepForever() {
for {
time.Sleep(5 * time.Second)
}
}
func Hex2Ascii(hexdata []byte, asciidata []byte) int {
hexlen := len(hexdata)
asciilen := len(asciidata)
if hexlen*3 >= asciilen {
return 0
} else {
for i := 0; i < hexlen; i++ {
asciidata[i*3] = hex2char(hexdata[i] & 0xf0 >> 4)
asciidata[i*3+1] = hex2char(hexdata[i] & 0x0f)
asciidata[i*3+2] = byte(' ')
}
return hexlen * 3
}
}
func hex2char(hex byte) byte {
if hex >= 0 && hex <= 9 {
hex += byte('0')
} else if hex >= 10 && hex <= 15 {
hex += byte('A') - 10
} else {
hex = byte(' ')
}
return hex
}
//------------------------------------------------------------------------------
//计算imu校验和
func CheckCrc16(data []byte) uint16 {
length := len(data)
if length < 1 {
return 0xffff
}
crcTmp := uint16(0xffff)
for i := 1; i < length; i++ {
crcTmp = crc_accumulate(data[i], crcTmp)
}
return crcTmp
}
func crc_accumulate(acrc byte, crc16 uint16) uint16 {
ch := (acrc ^ (byte)(crc16&0x00ff))
ch = byte(ch ^ (ch << 4))
ch16 := uint16(ch)
newcrc16 := (uint16)((crc16 >> 8) ^ (ch16 << 8) ^ (ch16 << 3) ^ (ch16 >> 4))
return newcrc16
}
const (
TRANS_HEAD= 0x5a
HEAD_SIZE= 1
LEN_SIZE=2
CRC_SIZE= 2
HEAD_LENGTH_LEN = (HEAD_SIZE + LEN_SIZE)
HEAD_LENGTH_CRC_LEN = (HEAD_LENGTH_LEN + CRC_SIZE)
)
var CRC16Table = [256]uint16{ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80,
0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100,
0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1,
0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80,
0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340,
0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00,
0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0,
0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381,
0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00,
0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0,
0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040, }
func CRC16(buf []byte, len int)uint16 {
crc_result := uint16(0x00)
table_num := uint16(0x00)
for i := 0;i < len;i++ {
table_num = ((crc_result & 0xff) ^ (uint16)(buf[i] & 0xff))
crc_result = ((crc_result >> 8) & 0xff) ^ CRC16Table[table_num]
}
return crc_result
}
/*
函数crc_check: 检查数据是否完整
return:
-1 校验失败, 0 检验成功
*/
/*func crc_check(protocol_buf []byte)uint16 {
/*if(protocol_buf == nil){
return -1
}*/
//payload_len := (protocol_buf[1] << 8 & 0xff00) | (protocol_buf[2] & 0xff)
//total_size := int(HEAD_SIZE + LEN_SIZE + payload_len + CRC_SIZE)
//src_crc_sum := (protocol_buf[total_size - 2] << 8 & 0xff00) | (protocol_buf[total_size - 1] & 0xff)
//p_data := protocol_buf + HEAD_SIZE + LEN_SIZE
//rc_sum_calc := CRC16(protocol_buf[:], int(payload_len))
/*if crc_sum_calc != src_crc_sum {
return -1
}
return 0
return crc_sum_calc
}*/
|
package main
import (
"fmt"
"os"
)
func check(table1 []string, table2 []string, table3 []string) bool {
if (table1[0] == table3[0] && table2[0] == table3[0]) ||
(table1[1] == table3[1] && table2[1] == table3[1]) ||
(table1[2] == table3[2] && table2[2] == table3[2]) ||
(table1[0] == table3[2] && table2[1] == table3[2]) ||
(table1[2] == table3[0] && table2[1] == table3[0]) ||
(table1[0] == table1[2] && table1[1] == table1[2]) ||
(table2[0] == table2[2] && table2[1] == table2[2]) ||
(table3[0] == table3[2] && table3[1] == table3[2]) {
fmt.Println("Game finished !")
return true
}
return false
}
func main() {
var count int
var table1 = []string{"1", "2", "3"}
var table2 = []string{"4", "5", "6"}
var table3 = []string{"7", "8", "9"}
fmt.Println("Start a game ? Y/N")
var resp string
fmt.Scanln(&resp)
if resp == "Y" || resp == "y" {
fmt.Println("Player 1, choose a name : ")
var player1 string
fmt.Scanln(&player1)
fmt.Println("Player 2, choose a name : ")
var player2 string
fmt.Scanln(&player2)
fmt.Print(table1, "\n", table2, "\n", table3, "\n")
for check(table1, table2, table3) != true {
fmt.Println(player1 + " : Your turn, select a number between 1 and 9")
var tour1 string
fmt.Scanln(&tour1)
for i := 0; i < len(table1); i++ {
for j := 0; j < len(table2); j++ {
for k := 0; k < len(table3); k++ {
if tour1 == table1[i] {
table1[i] = "X"
}
if tour1 == table2[j] {
table2[j] = "X"
}
if tour1 == table3[k] {
table3[k] = "X"
}
}
}
}
count++
if count == 9 {
fmt.Println("No winner...")
main()
}
fmt.Print(table1, "\n", table2, "\n", table3, "\n")
if check(table1, table2, table3) == true {
fmt.Println(player1 + " won !")
main()
}
fmt.Println(player2 + " : Your turn, select a number between 1 and 9")
var tour2 string
fmt.Scanln(&tour2)
for i := 0; i < len(table1); i++ {
for j := 0; j < len(table2); j++ {
for k := 0; k < len(table3); k++ {
if tour2 == table1[i] {
table1[i] = "O"
}
if tour2 == table2[j] {
table2[j] = "O"
}
if tour2 == table3[k] {
table3[k] = "O"
}
}
}
}
count++
fmt.Println(count)
if count == 9 {
fmt.Println("No winner...")
main()
}
fmt.Print(table1, "\n", table2, "\n", table3, "\n")
if check(table1, table2, table3) == true {
fmt.Println(player2 + " won !")
main()
}
}
}
if resp == "N" || resp == "n" {
fmt.Println("OK, bye")
os.Exit(3)
}
fmt.Println("Please select Y or N")
main()
}
|
package run
import (
"os"
"github.com/rs/zerolog/log"
"github.com/saucelabs/saucectl/internal/appstore"
"github.com/saucelabs/saucectl/internal/credentials"
"github.com/saucelabs/saucectl/internal/flags"
"github.com/saucelabs/saucectl/internal/rdc"
"github.com/saucelabs/saucectl/internal/region"
"github.com/saucelabs/saucectl/internal/resto"
"github.com/saucelabs/saucectl/internal/saucecloud"
"github.com/saucelabs/saucectl/internal/sentry"
"github.com/saucelabs/saucectl/internal/testcomposer"
"github.com/saucelabs/saucectl/internal/xcuitest"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type xcuitestFlags struct {
Device flags.Device
}
// NewXCUITestCmd creates the 'run' command for XCUITest.
func NewXCUITestCmd() *cobra.Command {
sc := flags.SnakeCharmer{Fmap: map[string]*pflag.Flag{}}
lflags := xcuitestFlags{}
cmd := &cobra.Command{
Use: "xcuitest",
Short: "Run xcuitest tests",
Hidden: true, // TODO reveal command once ready
TraverseChildren: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
sc.BindAll()
return preRun()
},
Run: func(cmd *cobra.Command, args []string) {
exitCode, err := runXcuitest(cmd, lflags, tcClient, restoClient, rdcClient, appsClient)
if err != nil {
log.Err(err).Msg("failed to execute run command")
sentry.CaptureError(err, sentry.Scope{
Username: credentials.Get().Username,
ConfigFile: gFlags.cfgFilePath,
})
}
os.Exit(exitCode)
},
}
sc.Fset = cmd.Flags()
sc.String("name", "suite.name", "", "Sets the name of the job as it will appear on Sauce Labs")
sc.String("app", "xcuitest.app", "", "Specifies the app under test")
sc.String("testApp", "xcuitest.testApp", "", "Specifies the test app")
sc.StringSlice("otherApps", "xcuitest.otherApps", []string{}, "Specifies any additional apps that are installed alongside the main app")
// Test Options
sc.StringSlice("testOptions.class", "suite.testOptions.class", []string{}, "Include classes")
// Devices (no simulators)
cmd.Flags().Var(&lflags.Device, "device", "Specifies the device to use for testing")
return cmd
}
func runXcuitest(cmd *cobra.Command, flags xcuitestFlags, tc testcomposer.Client, rs resto.Client, rc rdc.Client,
as appstore.AppStore) (int, error) {
p, err := xcuitest.FromFile(gFlags.cfgFilePath)
if err != nil {
return 1, err
}
p.Sauce.Metadata.ExpandEnv()
applyGlobalFlags(cmd, &p.Sauce, &p.Artifacts)
if err := applyXCUITestFlags(&p, flags); err != nil {
return 1, err
}
xcuitest.SetDefaults(&p)
if err := xcuitest.Validate(p); err != nil {
return 1, err
}
regio := region.FromString(p.Sauce.Region)
tc.URL = regio.APIBaseURL()
rs.URL = regio.APIBaseURL()
as.URL = regio.APIBaseURL()
rc.URL = regio.APIBaseURL()
rs.ArtifactConfig = p.Artifacts.Download
rc.ArtifactConfig = p.Artifacts.Download
return runXcuitestInCloud(p, regio, tc, rs, rc, as)
}
func runXcuitestInCloud(p xcuitest.Project, regio region.Region, tc testcomposer.Client, rs resto.Client, rc rdc.Client, as appstore.AppStore) (int, error) {
log.Info().Msg("Running XCUITest in Sauce Labs")
printTestEnv("sauce")
r := saucecloud.XcuitestRunner{
Project: p,
CloudRunner: saucecloud.CloudRunner{
ProjectUploader: &as,
JobStarter: &tc,
JobReader: &rs,
RDCJobReader: &rc,
JobStopper: &rs,
JobWriter: &tc,
CCYReader: &rs,
TunnelService: &rs,
Region: regio,
ShowConsoleLog: false,
ArtifactDownloader: &rs,
RDCArtifactDownloader: &rc,
},
}
return r.RunProject()
}
func applyXCUITestFlags(p *xcuitest.Project, flags xcuitestFlags) error {
if gFlags.selectedSuite != "" {
if err := xcuitest.FilterSuites(p, gFlags.selectedSuite); err != nil {
return err
}
}
if p.Suite.Name == "" {
return nil
}
if flags.Device.Changed {
p.Suite.Devices = append(p.Suite.Devices, flags.Device.Device)
}
p.Suites = []xcuitest.Suite{p.Suite}
return nil
}
|
// Copyright 2018 Kuei-chun Chen. All rights reserved.
package sim
import (
"bufio"
"context"
"encoding/json"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/simagix/gox"
anly "github.com/simagix/keyhole/analytics"
"github.com/simagix/keyhole/mdb"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
)
const (
outdir = "./out"
)
// Runner -
type Runner struct {
Logger *gox.Logger `bson:"keyhole"`
Metrics map[string][]bson.M `bson:"metrics"`
Results []string `bson:"results"`
auto bool
channel chan string
client *mongo.Client
clusterType string
collectionName string
conns int
connString connstring.ConnString
dbName string
drop bool
duration int
filename string
mutex sync.RWMutex
peek bool
simOnly bool
tps int
txFilename string
uri string
uriList []string
verbose bool
}
// NewRunner - Constructor
func NewRunner(connString connstring.ConnString) (*Runner, error) {
var err error
runner := Runner{Logger: gox.GetLogger("keyhole"), connString: connString, conns: runtime.NumCPU(),
channel: make(chan string), collectionName: mdb.ExamplesCollection, Metrics: map[string][]bson.M{},
mutex: sync.RWMutex{}}
runner.dbName = connString.Database
if runner.dbName == "" {
runner.dbName = mdb.KeyholeDB
}
if runner.client, err = mdb.NewMongoClient(connString.String()); err != nil {
return &runner, err
}
stats := mdb.NewClusterStats("")
stats.GetClusterStatsSummary(runner.client)
runner.clusterType = stats.Cluster
if runner.clusterType == "" {
runner.Logger.Warn("unable to retrieve cluster type")
}
runner.uriList = []string{connString.String()}
if runner.clusterType == mdb.Sharded {
if shards, err := mdb.GetShards(runner.client); err != nil {
return &runner, err
} else if runner.uriList, err = mdb.GetAllShardURIs(shards, connString); err != nil {
return &runner, err
}
}
runner.uri = runner.uriList[len(runner.uriList)-1]
return &runner, nil
}
// SetCollection set collection name
func (rn *Runner) SetCollection(collectionName string) {
if collectionName != "" {
rn.collectionName = collectionName
} else {
rn.collectionName = mdb.ExamplesCollection
}
}
// SetTPS set transaction per second
func (rn *Runner) SetTPS(tps int) {
rn.tps = tps
}
// SetAutoMode set transaction per second
func (rn *Runner) SetAutoMode(auto bool) { rn.auto = auto }
// SetTemplateFilename -
func (rn *Runner) SetTemplateFilename(filename string) {
rn.filename = filename
}
// SetVerbose sets verbose mode
func (rn *Runner) SetVerbose(verbose bool) {
rn.verbose = verbose
}
// SetPeekingMode -
func (rn *Runner) SetPeekingMode(mode bool) {
rn.peek = mode
if rn.peek {
go func(x int) {
time.Sleep(time.Duration(x) * time.Minute)
rn.terminate()
}(rn.duration)
}
}
// SetDuration sets simulation/load test duration
func (rn *Runner) SetDuration(duration int) {
rn.duration = duration
}
// SetDropFirstMode -
func (rn *Runner) SetDropFirstMode(mode bool) {
rn.drop = mode
}
// SetNumberConnections -
func (rn *Runner) SetNumberConnections(num int) {
if num == 0 {
return
}
rn.conns = num
}
// SetTransactionTemplate sets transaction template file
func (rn *Runner) SetTransactionTemplate(filename string) {
rn.txFilename = filename
}
// SetSimOnlyMode -
func (rn *Runner) SetSimOnlyMode(mode bool) {
rn.simOnly = mode
}
// Start process requests
func (rn *Runner) Start() error {
var err error
if rn.peek {
return nil
}
if !rn.auto {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Begin a load test [y/N]: ")
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
if text != "y" && text != "Y" {
os.Exit(0)
}
}
rn.Logger.Info("Duration in minute(s):", rn.duration)
if rn.dbName == "" || rn.dbName == "admin" || rn.dbName == "config" || rn.dbName == "local" {
rn.dbName = mdb.KeyholeDB // switch to _KEYHOLE_88800 database for load tests
}
if rn.drop {
rn.Cleanup()
}
rn.initSimDocs()
tdoc := GetTransactions(rn.txFilename)
// Simulation mode
// 1st minute - build up data and memory
// 2nd and 3rd minutes - normal TPS ops
// remaining minutes - burst with no delay
// last minute - normal TPS ops until exit
rn.Logger.Info(fmt.Sprintf("Total TPS: %d (%d tps/conn * %d conns), duration: %d (mins)", rn.tps*rn.conns, rn.tps, rn.conns, rn.duration))
simTime := rn.duration
if !rn.simOnly {
simTime--
rn.createIndexes(tdoc.Indexes)
}
for i := 0; i < rn.conns; i++ {
go func(thread int) {
if !rn.simOnly && rn.duration > 0 {
if err = rn.PopulateData(); err != nil {
rn.Logger.Info("Thread", thread, "existing with", err)
return
}
time.Sleep(10 * time.Millisecond)
}
if err = rn.Simulate(simTime, tdoc.Transactions, thread); err != nil {
rn.Logger.Info("Thread", thread, "existing with", err)
return
}
}(i)
}
return nil
}
func (rn *Runner) terminate() {
var err error
var client *mongo.Client
var filename string
var filenames []string
var result string
os.Mkdir(outdir, 0755)
rn.Cleanup()
rn.Results = []string{}
for _, uri := range rn.uriList {
if client, err = mdb.NewMongoClient(uri); err != nil {
rn.Logger.Info(err)
continue
}
var err error
var stat anly.ServerStatusDoc
serverStatus, _ := mdb.RunAdminCommand(client, "serverStatus")
buf, _ := bson.Marshal(serverStatus)
bson.Unmarshal(buf, &stat)
serverStatusDocs[uri] = append(serverStatusDocs[uri], stat)
var data []byte
if data, err = getServerStatusData(uri); err != nil {
rn.Logger.Error(err)
break
}
// save metrics to a file
filename := fmt.Sprintf("%v/%v-%v.gz", outdir, keyholeStatsDataFile, getReplicaSetName(uri))
filenames = append(filenames, filename)
gox.OutputGzipped(data, filename)
d := anly.NewDiagnosticData()
reader := bufio.NewReader(strings.NewReader(string(data)))
if err = d.AnalyzeServerStatus(reader); err != nil {
rn.Logger.Error(err)
break
}
strs := []string{}
if d.ServerInfo != nil {
var p mdb.ClusterStats
data, _ := json.Marshal(d.ServerInfo)
json.Unmarshal(data, &p)
result := fmt.Sprintf("\n* MongoDB v%v %v (%v) %v %v %v cores %v mem",
p.BuildInfo.Version, p.HostInfo.System.Hostname, p.HostInfo.OS.Name,
p.ServerStatus.Process, p.Cluster, p.HostInfo.System.NumCores, p.HostInfo.System.MemSizeMB)
strs = append(strs, result)
}
strs = append(strs, anly.PrintAllStats(d.ServerStatusList, -1))
result = strings.Join(strs, "\n")
fmt.Println(result)
rn.Results = append(rn.Results, result)
}
hostname, _ := os.Hostname()
filename = fmt.Sprintf(`%s/%s.%s-perf.bson.gz`, outdir, hostname, fileTimestamp)
var buf []byte
if buf, err = bson.Marshal(rn); err != nil {
rn.Logger.Info("marshal error:", err)
}
gox.OutputGzipped(buf, filename)
filenames = append(filenames, filename)
zipFile := fmt.Sprintf(`%s/%s.%s-perf.zip`, outdir, hostname, fileTimestamp)
gox.ZipFiles(zipFile, filenames)
rn.Logger.Info("stats written to ", zipFile)
for _, f := range filenames {
os.Remove(f)
}
os.Exit(0)
}
// CollectAllStatus collects all server stats
func (rn *Runner) CollectAllStatus() error {
var err error
for i, uri := range rn.uriList {
var client *mongo.Client
if client, err = mdb.NewMongoClient(uri); err != nil {
rn.Logger.Info(err)
continue
}
stats := NewServerStats(uri, rn.channel)
stats.SetVerbose(rn.verbose)
stats.SetPeekingMode(rn.peek)
go stats.getDBStats(client, rn.dbName)
go stats.getReplSetGetStatus(client)
go stats.getServerStatus(client)
go stats.getMongoConfig(client)
if i == 0 {
go stats.collectMetrics(client, uri)
}
}
quit := make(chan os.Signal, 2)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
timer := time.NewTimer(time.Duration(rn.duration) * time.Minute)
for {
select {
case <-quit:
rn.terminate()
case <-timer.C:
rn.terminate()
default:
rn.Logger.Info(<-rn.channel)
}
time.Sleep(50 * time.Millisecond)
}
}
// CreateIndexes creates indexes
func (rn *Runner) createIndexes(docs []bson.M) error {
var err error
var ctx = context.Background()
c := rn.client.Database(rn.dbName).Collection(rn.collectionName)
indexView := c.Indexes()
idx := mongo.IndexModel{Keys: bson.D{{Key: "_search", Value: 1}}}
if _, err = indexView.CreateOne(ctx, idx); err != nil {
return err
}
if len(docs) == 0 {
idx = mongo.IndexModel{Keys: bson.D{{Key: "email", Value: 1}}}
if _, err = indexView.CreateOne(ctx, idx); err != nil {
return err
}
if rn.clusterType == mdb.Sharded {
if err = rn.splitChunks(); err != nil {
fmt.Println(err)
}
}
}
for _, doc := range docs {
keys := bson.D{}
for k, v := range doc {
x := int32(1)
switch v := v.(type) {
case int:
if v < 0 {
x = -1
}
case float64:
if v < 0 {
x = -1
}
}
keys = append(keys, bson.E{Key: k, Value: x})
}
idx := mongo.IndexModel{
Keys: keys,
}
if _, err = indexView.CreateOne(ctx, idx); err != nil {
return err
}
}
return err
}
// Cleanup drops the temp database
func (rn *Runner) Cleanup() error {
var err error
if rn.peek {
return err
}
if !rn.simOnly && rn.dbName == mdb.KeyholeDB {
ctx := context.Background()
if rn.collectionName == mdb.ExamplesCollection {
rn.Logger.Info("dropping collection", mdb.KeyholeDB, mdb.ExamplesCollection)
if err = rn.client.Database(mdb.KeyholeDB).Collection(mdb.ExamplesCollection).Drop(ctx); err != nil {
rn.Logger.Info(err)
}
}
rn.Logger.Info("dropping temp database", mdb.KeyholeDB)
if err = rn.client.Database(rn.dbName).Drop(ctx); err != nil {
rn.Logger.Info(err)
}
}
time.Sleep(time.Second)
return err
}
func (rn *Runner) splitChunks() error {
var err error
var ctx = context.Background()
var cursor *mongo.Cursor
ns := rn.dbName + "." + rn.collectionName
result := bson.M{}
filter := bson.M{"_id": rn.dbName}
if err = rn.client.Database("config").Collection("databases").FindOne(ctx, filter).Decode(&result); err != nil {
return err
}
primary := result["primary"].(string)
rn.Logger.Info("Sharding collection:", ns)
cmd := bson.D{{Key: "enableSharding", Value: rn.dbName}}
if err = rn.client.Database("admin").RunCommand(ctx, cmd).Decode(&result); err != nil {
return err
}
cmd = bson.D{{Key: "shardCollection", Value: ns}, {Key: "key", Value: bson.M{"email": 1}}}
if err = rn.client.Database("admin").RunCommand(ctx, cmd).Decode(&result); err != nil {
return err
}
rn.Logger.Info("splitting chunks...")
if cursor, err = rn.client.Database("config").Collection("shards").Find(ctx, bson.D{{}}); err != nil {
return err
}
otherShards := []bson.M{}
for cursor.Next(ctx) {
v := bson.M{}
if err = cursor.Decode(&v); err != nil {
rn.Logger.Info(err)
continue
}
if primary != v["_id"].(string) {
otherShards = append(otherShards, v)
}
}
shardKeys := []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"}
divider := 1 + len(shardKeys)/(len(otherShards)+1)
for i := range otherShards {
cmd := bson.D{{Key: "split", Value: ns}, {Key: "middle", Value: bson.M{"email": shardKeys[(i+1)*divider]}}}
if err = rn.client.Database("admin").RunCommand(ctx, cmd).Decode(&result); err != nil { // could be split already
return err
}
}
if len(otherShards) < 1 {
return nil
}
rn.Logger.Info("moving chunks...")
filter = bson.M{"ns": ns}
opts := options.Find()
opts.SetSort(bson.D{{Key: "_id", Value: -1}})
if cursor, err = rn.client.Database("config").Collection("chunks").Find(ctx, filter, opts); err != nil {
return err
}
i := 0
for cursor.Next(ctx) {
v := bson.M{}
if err = cursor.Decode(&v); err != nil {
continue
}
if v["shard"].(string) == otherShards[i]["_id"].(string) {
i++
continue
}
cmd := bson.D{{Key: "moveChunk", Value: ns}, {Key: "find", Value: v["min"].(bson.M)},
{Key: "to", Value: otherShards[i]["_id"].(string)}}
rn.Logger.Info(fmt.Sprintf("moving %v from %v to %v", v["min"], v["shard"], otherShards[i]["_id"]))
if err = rn.client.Database("admin").RunCommand(ctx, cmd).Decode(&result); err != nil {
log.Fatal(err)
}
i++
if i == len(otherShards) {
break
}
}
time.Sleep(1 * time.Second)
return nil
}
|
package services
import (
"proximity/config"
"proximity/repo"
)
// UserInterface ...
type UserInterface interface {
GetAllBadWords(skip int, limit int) ([]string, error)
}
//NewUserService ..
func NewUserService(conf config.IConfig, userDb repo.UserRepoInterface) CmsInterface {
return &Cms{config: conf, user: userDb}
}
// GetRole .. get role of a user
func (i *Cms) GetRole(userName string) (role string, err error) {
wds, err := i.user.GetOne(userName)
if err != nil {
return nil, err
}
for _, w := range wds {
words = append(words, w.Word)
}
return words, nil
}
// User ..
type User struct {
config config.IConfig
user repo.UserRepoInterface
}
|
package main
import "fmt"
// 用two point,头尾指针也可以
func twoSum(nums []int, target int) []int {
for i, v := range nums {
if ii := searchLast(nums, target-v); ii != -1 {
return []int{1+i, 1+ii}
}
}
return []int{-1,-1}
}
func searchLast(nums []int, target int) int {
low, high := 0, len(nums)-1
for low <= high {
m := (low+high)/2
if target > nums[m] {
// 右边
low = m+1
} else if target < nums[m] {
// 左边
high = m-1
} else {
if m == len(nums)-1 || nums[m+1] != target {
return m
}
low = m+1
}
}
return -1
}
func main() {
fmt.Println(twoSum([]int{2,7,11,15}, 9))
}
|
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app_test
import (
"context"
"testing"
"github.com/kubevela/pkg/controller/sharding"
"github.com/stretchr/testify/require"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
apputil "github.com/oam-dev/kubevela/pkg/utils/app"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
func TestReschedule(t *testing.T) {
ctx := context.Background()
app := &v1beta1.Application{}
app.SetName("app")
app.SetNamespace("default")
labels := map[string]string{
oam.LabelAppName: "app",
oam.LabelAppNamespace: "default",
}
appRev := &v1beta1.ApplicationRevision{}
appRev.SetName("a1")
appRev.SetNamespace("default")
appRev.SetLabels(labels)
rt := &v1beta1.ResourceTracker{}
rt.SetName("r1")
rt.SetLabels(labels)
rt.Spec.Type = v1beta1.ResourceTrackerTypeRoot
cli := fake.NewClientBuilder().WithScheme(common.Scheme).WithObjects(app, appRev, rt).Build()
err := apputil.RescheduleAppRevAndRT(ctx, cli, app, "s1")
require.NoError(t, err)
require.NoError(t, cli.Get(ctx, client.ObjectKeyFromObject(appRev), appRev))
require.Equal(t, "s1", appRev.GetLabels()[sharding.LabelKubeVelaScheduledShardID])
require.NoError(t, cli.Get(ctx, client.ObjectKeyFromObject(rt), rt))
require.Equal(t, "s1", rt.GetLabels()[sharding.LabelKubeVelaScheduledShardID])
}
|
package logger
import (
"fmt"
"log"
"os"
)
//INITIALIZING LOG FOR SUCCESS
func Success(file *os.File) *log.Logger {
LogSucc := log.New(file, "SUCCESS: ", log.Ldate|log.Ltime|log.Lshortfile)
return LogSucc
}
//INITIALIZING LOG FOR FAILURE
func Failure(file *os.File) *log.Logger {
LogFail := log.New(file, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile)
return LogFail
}
//INITIALIZING LOG FOR FAILURE
func Info(file *os.File) *log.Logger {
LogInfo := log.New(file, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile)
return LogInfo
}
//INITIALIZER FOR LOG FILE
func LogInit() *os.File {
file, err := os.OpenFile("../log.txt", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log.txt file", err)
os.Exit(1)
}
return file
}
//INITIALIZER FOR SCHEDULAR LOG FILE
func LogSchInit() *os.File {
file, err := os.OpenFile("../log_schedular.txt", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log.txt file", err)
os.Exit(1)
}
return file
}
//INITIALIZER FOR MANAGER LOG FILE
func LogManInit() *os.File {
file, err := os.OpenFile("../log_manager.txt", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log.txt file", err)
os.Exit(1)
}
return file
}
//INITIALIZER FOR MANAGER LOG FILE
func LogAgentInit() *os.File {
file, err := os.OpenFile("../log_agent.txt", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log.txt file", err)
os.Exit(1)
}
return file
}
//INITIALIZER FOR MANAGER LOG FILE
func LogValInit() *os.File {
file, err := os.OpenFile("../log_validator.txt", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log.txt file", err)
os.Exit(1)
}
return file
} |
package main
import "fmt"
func RetriveData(c chan int) {
for {
if v, ok := <-c; ok {
fmt.Printf("cur value is %v\n", v)
} else {
fmt.Printf("chan is already closed.\n")
goto end
}
}
end:
fmt.Printf("over\n")
}
func AddData(c chan int) {
for _, i := range []int{1, 2, 3} {
if i > 2 {
close(c)
} else {
c <- i
c <- i + 1
}
}
}
func main() {
c := make(chan int, 10)
go AddData(c)
go RetriveData(c)
for {
}
}
|
package raft
import (
"fmt"
"log"
)
func init() {
log.SetFlags(log.Ltime | log.Lmicroseconds)
}
const debugging = false
func serverDPrint(id int, state raftServerState, source string, format string, args ...interface{}) {
if !debugging {
return
}
allArgs := append([]interface{}{id, state, source}, args...)
log.Printf("%d | %-10s | %-13s | "+format, allArgs...)
}
func stringifyState(rf *Raft) string {
if !debugging {
return ""
}
logEntries := make([]LogEntry, 0, len(rf.log))
for _, entry := range rf.log {
// Store the command as a pointer but the term as a value
// So that the output will be useful
logEntries = append(logEntries, LogEntry{Term: entry.Term, Command: &entry.Command})
}
return fmt.Sprintf("%+v",
struct {
me int
currentTerm int
votedFor int
log []LogEntry
state raftServerState
commitIndex int
lastApplied int
leaderId int
nextIndex []int
matchIndex []int
numVotesGathered int
}{
me: rf.me,
currentTerm: rf.currentTerm,
votedFor: rf.votedFor,
log: logEntries,
state: rf.state,
commitIndex: rf.commitIndex,
lastApplied: rf.lastApplied,
leaderId: rf.leaderId,
nextIndex: rf.nextIndex,
matchIndex: rf.matchIndex,
numVotesGathered: rf.numVotesGathered,
})
}
func stringifyCommand(command interface{}) string {
if !debugging {
return ""
}
stringifiedCommand := fmt.Sprintf("%+v", command)
if len(stringifiedCommand) > 10 {
stringifiedCommand = stringifiedCommand[:10] + "..."
}
return stringifiedCommand
}
func assert(condition bool) {
if !condition {
panic("assertion failed")
}
}
|
package main
import "testing"
func TestSolve(t *testing.T) {
var cases = []struct {
n int
m int
out int64
}{
{2, 2, 3},
{3, 2, 7},
{2, 3, 9},
{2, 4, 27},
{4, 4, 3375},
}
for _, c := range cases {
if out := Solve(c.n, c.m); out != c.out {
t.Errorf("Solve(%v, %v)=%v, expected %v", c.n, c.m, out, c.out)
}
}
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-20 15:45
* Description:
*****************************************************************/
package pdl
import "io"
type FileExport interface {
BeginProjectWrite() error
EndProjectWrite()
BeginNamespace(name string) error
EndNamespace(name string)
BeginFileWrite(ns *FileNamespace, fileName string) (w io.Writer, cxt interface{}, err error)
WriteNamespace(w io.Writer, cxt interface{}, namespace string) error
WriteImports(w io.Writer, cxt interface{}, im []string) error
WriteTypedefs(w io.Writer, cxt interface{}, defs map[string]*FileTypeDef) error
WriteTypes(w io.Writer, cxt interface{}, types map[string]*FileStruct) error
WriteServices(w io.Writer, cxt interface{}, ss map[string]*FileService) error
Flush(w io.Writer, cxt interface{}) error
EndFileWrite(w io.Writer, ns *FileNamespace, fileName string)
}
|
package node
import (
"encoding/json"
"fmt"
com "github.com/hyperorchidlab/go-miner-pool/common"
"golang.org/x/crypto/ssh/terminal"
"io/ioutil"
"os"
"os/user"
"path/filepath"
)
type PathConf struct {
WalletPath string
DBPath string
LogPath string
PidPath string
ConfPath string
}
type Conf struct {
BAS string
*com.EthereumConfig
}
const (
DefaultBaseDir = ".hop"
WalletFile = "wallet.json"
DataBase = "Receipts"
LogFile = "log.hop"
PidFile = "pid.hop"
ConfFile = "conf.hop"
)
var CMDServicePort = "42017"
var SysConf = &Conf{}
var PathSetting = &PathConf{}
func BaseDir() string {
usr, err := user.Current()
if err != nil {
panic(err)
}
baseDir := filepath.Join(usr.HomeDir, string(filepath.Separator), DefaultBaseDir)
return baseDir
}
func WalletDir(base string) string {
return filepath.Join(base, string(filepath.Separator), WalletFile)
}
func (pc *PathConf) String() string {
return fmt.Sprintf("\n++++++++++++++++++++++++++++++++++++++++++++++++++++\n"+
"+WalletPath:\t%s+\n"+
"+DBPath:\t%s+\n"+
"+LogPath:\t%s+\n"+
"+PidPath:\t%s+\n"+
"+ConfPath:\t%s+\n"+
"++++++++++++++++++++++++++++++++++++++++++++++++++++\n",
pc.WalletPath,
pc.DBPath,
pc.LogPath,
pc.PidPath,
pc.ConfPath)
}
func (pc *PathConf) InitPath() {
base := BaseDir()
if _, ok := com.FileExists(base); !ok {
panic("Init node first, please!' HOP init -p [PASSWORD]'")
}
pc.WalletPath = filepath.Join(base, string(filepath.Separator), WalletFile)
pc.DBPath = filepath.Join(base, string(filepath.Separator), DataBase)
pc.LogPath = filepath.Join(base, string(filepath.Separator), LogFile)
pc.PidPath = filepath.Join(base, string(filepath.Separator), PidFile)
pc.ConfPath = filepath.Join(base, string(filepath.Separator), ConfFile)
fmt.Println(pc.String())
}
func InitMinerNode(auth, port string) {
PathSetting.InitPath()
jsonStr, err := ioutil.ReadFile(PathSetting.ConfPath)
if err != nil {
panic("Load config failed")
}
if err := json.Unmarshal(jsonStr, SysConf); err != nil {
panic(err)
}
fmt.Println(SysConf.String())
if auth == "" {
fmt.Println("Password=>")
pw, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
panic(err)
}
auth = string(pw)
}
if err := WInst().Open(auth); err != nil {
panic(err)
}
com.InitLog(PathSetting.LogPath)
CMDServicePort = port
}
|
package collector
import (
"context"
"fmt"
"github.com/jenningsloy318/panos_exporter/panos"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
//"net/url"
)
var (
GlobalCounterSubsystem = "global_counter"
GlobalCounterLabelNames = []string{"category", "aspect", "severity", "data_processor", "domain"}
)
type GlobalCounterCollector struct {
ctx context.Context
metrics map[string]GlobalCounterMetric
panosClient *panos.PaloAlto
collectorScrapeStatus *prometheus.GaugeVec
}
type GlobalCounterMetric struct {
desc *prometheus.Desc
}
func NewGlobalCounterCollector(ctx context.Context, namespace string, panosClient *panos.PaloAlto) *GlobalCounterCollector {
return &GlobalCounterCollector{
ctx: ctx,
panosClient: panosClient,
collectorScrapeStatus: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "collector_scrape_status",
Help: "collector_scrape_status",
},
[]string{"collector"},
),
}
}
func (g *GlobalCounterCollector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range g.metrics {
ch <- metric.desc
}
g.collectorScrapeStatus.Describe(ch)
}
func (g *GlobalCounterCollector) Collect(ch chan<- prometheus.Metric) {
gContext, gCancel := context.WithCancel(g.ctx)
defer gCancel()
//initialize metrics map allows later assignment
g.metrics = map[string]GlobalCounterMetric{}
globalCounterData, err := g.panosClient.GetGlobalCounterData(gContext)
if err != nil {
log.Errorf("Error getting global counter data, %s", err)
return
}
dp := globalCounterData.Result.DP
globalCounterDataEntries := globalCounterData.Result.GlobalCounter.GlobalCountersData.GlobalCounterEntriesData
for _, entry := range globalCounterDataEntries {
labelValues := []string{entry.Category, entry.Aspect, entry.Severity, dp, "global_counter"}
metricName := entry.Name
metricDesc := fmt.Sprintf("global counter for %s", entry.Desc)
newGlobalCounterMetric := GlobalCounterMetric{
desc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, GlobalCounterSubsystem, metricName),
metricDesc,
GlobalCounterLabelNames,
nil,
),
}
g.metrics[metricName] = newGlobalCounterMetric
ch <- prometheus.MustNewConstMetric(newGlobalCounterMetric.desc, prometheus.GaugeValue, entry.Value, labelValues...)
}
g.collectorScrapeStatus.WithLabelValues("global_counter").Set(float64(1))
}
|
package spider
import (
"github.com/yino/AgentSpider/po"
)
func TimerSyncSpider(){
po.InitDB()
exec := NewGetDataSpider("https://www.89ip.cn/index_2.html", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36")
pageSize := 185
exec.GetList(int64(pageSize))
}
|
package main
import (
"fmt"
"log"
"net/http"
)
type myHandler struct {
}
func NewMyHandler() myHandler {
return myHandler{}
}
func (m myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "hello world, request method: %v", r.Method)
}
func main() {
port := "8080"
server := &http.Server{
Addr: fmt.Sprintf(":%v", port),
Handler: NewMyHandler(),
ReadTimeout: 0,
ReadHeaderTimeout: 0,
WriteTimeout: 0,
IdleTimeout: 0,
MaxHeaderBytes: 0,
}
log.Printf("listening to port: %v", port)
log.Fatal(server.ListenAndServe())
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type CreateEnumStmt struct {
TypeName *ast.List
Vals *ast.List
}
func (n *CreateEnumStmt) Pos() int {
return 0
}
|
package transport
import "github.com/shijuvar/gokit-examples/services/account"
type (
CreateCustomerRequest struct {
Customer account.Customer
}
CreateCustomerResponse struct {
Err error
}
AddMoneyToWalletRequest struct {
CustomerID string
Amount float64
}
AddMoneyToWalletResponse struct {
Err error
}
GetWalletBalanceRequest struct {
CustomerID string
}
GetWalletBalanceResponse struct {
Amount float64
Err error
}
MakePaymentRequest struct {
CustomerID string
Amount float64
}
MakePaymentResponse struct {
Err error
}
)
|
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strings"
"syscall"
"time"
rpc "github.com/hekmon/transmissionrpc"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/multierr"
"github.com/danielmmetz/autoplex/pkg/extract"
"github.com/danielmmetz/autoplex/pkg/finder"
)
var sample = regexp.MustCompile("(?i)sample")
func main() {
host := pflag.String("host", "localhost", "host at which to access transmission")
frequency := pflag.Duration("frequency", 1*time.Minute, "duration between runs")
srcs := pflag.StringSlice("src", []string{}, "source directory for downloaded files")
dests := pflag.StringSlice("dest", []string{}, "destination directory for extracted files")
modeF := pflag.String("mode", "link", "method by which to create the destination files (copy or link)")
pflag.Parse()
_ = viper.BindPFlags(pflag.CommandLine)
var m mode
switch *modeF {
case "link":
m = link
case "copy":
m = copy
default:
log.Fatal("configuration error: mode must be either link or copy")
}
if len(*srcs) != len(*dests) {
log.Fatal("configuration error: unequal number of sources and destinations")
}
pairs := zip(*srcs, *dests)
log.Print("running with the following parameters:")
log.Print("\tmode: ", m)
log.Print("\tfrequency: ", frequency)
log.Printf("\tpairs: %+v", pairs)
if err := exec.Command("which", "unrar").Run(); err != nil {
log.Fatalln("error: could not find unrar")
}
tc, err := rpc.New(*host, "rpcuser", "rpcpass", nil)
if err != nil {
log.Fatalln("error intiializing tranmission client: ", err)
}
ticker := time.NewTicker(*frequency)
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
w := worker{mode: m, tc: tc, srcDestPairs: pairs, retries: 1}
if err := w.work(); err != nil {
log.Print(err)
}
for {
select {
case <-ticker.C:
log.Print("run starting")
if err := w.work(); err != nil {
log.Print(err)
}
log.Print("run successful")
case <-sigs:
log.Print("exiting")
return
}
}
}
type srcDest struct {
src string
dest string
}
func zip(srcs, dests []string) []srcDest {
pairs := []srcDest{}
for i := range srcs {
if i >= len(dests) {
break
}
pairs = append(pairs, srcDest{src: srcs[i], dest: dests[i]})
}
return pairs
}
type worker struct {
mode mode
tc *rpc.Client
srcDestPairs []srcDest
retries int
resultCache resultCache
}
type mode string
const copy mode = "copy"
const link mode = "link"
type resultCache struct {
success map[string]bool
attempts map[string]int
}
func (c *resultCache) RecordAttempt(key string, success bool) {
if c.success == nil {
c.success = make(map[string]bool)
}
if c.attempts == nil {
c.attempts = make(map[string]int)
}
c.success[key] = success
c.attempts[key] = c.attempts[key] + 1
}
func (w *worker) work() error {
torrents, err := finder.GetFinishedTorrents(w.tc)
if err != nil {
return fmt.Errorf("error getting torrents: %w", err)
}
for _, candidate := range torrents {
if w.resultCache.success[*candidate.Name] || w.resultCache.attempts[*candidate.Name] >= w.retries {
continue
}
var dest string
for _, p := range w.srcDestPairs {
if *candidate.DownloadDir == p.src {
dest = p.dest
break
}
}
if dest == "" {
log.Print("no src-dest pair for processing ", *candidate.DownloadDir)
w.resultCache.RecordAttempt(*candidate.Name, false)
continue
}
path := filepath.Join(*candidate.DownloadDir, *candidate.Name)
file, err := os.Open(path)
if err != nil {
log.Printf("error opening filepath %s: %v", path, err)
w.resultCache.RecordAttempt(*candidate.Name, false)
continue
}
stat, err := file.Stat()
if err != nil {
log.Printf("error calling Stat() on filepath %s: %v", path, err)
w.resultCache.RecordAttempt(*candidate.Name, false)
continue
}
if !stat.IsDir() {
w.resultCache.RecordAttempt(*candidate.Name, false)
continue
}
if containsRar, err := processRar(path, dest); err != nil {
log.Printf("error during processRar(%s): %v", path, err)
w.resultCache.RecordAttempt(*candidate.Name, false)
continue
} else if containsRar { // success. no need to continue trying
w.resultCache.RecordAttempt(*candidate.Name, true)
continue
}
containsMKV, err := w.processMKVS(path, dest)
if err != nil {
w.resultCache.RecordAttempt(*candidate.Name, false)
log.Printf("error during processMKVS(%s): %v", path, err)
continue
}
w.resultCache.RecordAttempt(*candidate.Name, containsMKV)
}
return nil
}
// processRar looks in path for .rar files. If present, attempts to find an .mkv
// within the archive and extract it to destDir.
func processRar(path string, destDir string) (containsRar bool, err error) {
files, err := ioutil.ReadDir(path)
if err != nil {
log.Printf("error listing files in directory %v: %v", path, err)
return false, err
}
rar := extract.FindRar(files)
if rar == nil {
// log.Printf("error finding rar in %v: %v", *candidate.Name, err)
return false, err
}
// List rar contents
rarPath := filepath.Join(path, rar.Name())
rawContents, err := exec.Command("unrar", "lbare", rarPath).Output()
if err != nil {
log.Printf("error listing contents of %s: %v", rarPath, err)
return true, err
}
archiveContents := strings.Split(string(rawContents), "\n")
// Identify the desired file
targetMKVName := extract.FindMKV(archiveContents)
if targetMKVName == "" {
log.Print("no .mkv found in ", rar.Name())
return true, err
}
found, err := finder.Contains(targetMKVName, destDir)
if err != nil {
log.Printf("error searching for %s in %s: %v", targetMKVName, destDir, err)
return true, err
} else if found {
// log.Printf("found %v. skipping extraction", targetMKVName)
return true, err
}
// Extract to well known path
f, err := os.Create(filepath.Join(destDir, targetMKVName))
if err != nil {
log.Printf("unable to create file %s: %v", filepath.Join(destDir, targetMKVName), err)
return true, err
}
log.Print("extracting ", targetMKVName)
cmd := exec.Command("unrar", "p", "-inul", rarPath, targetMKVName)
cmd.Stdout = f
if err := cmd.Run(); err != nil {
log.Printf("error while extracting %s: %v", targetMKVName, err)
_ = os.Remove(f.Name())
return true, err
}
if err := f.Close(); err != nil {
log.Printf("error closing %s: %v. removing it", targetMKVName, err)
_ = os.Remove(f.Name())
return true, err
}
log.Print("successfully extracted rar from ", filepath.Base(path))
return true, nil
}
func (w *worker) processMKVS(path string, destDir string) (containsMKV bool, err error) {
mkvPaths, err := finder.FindMKVS(path)
if err != nil {
log.Printf("error finding mkv in directory %v: %v", path, err)
return false, err
} else if len(mkvPaths) == 0 {
return false, nil
}
for _, mkvPath := range mkvPaths {
if sample.MatchString(mkvPath) {
continue
}
found, newErr := finder.Contains(filepath.Base(mkvPath), destDir)
if newErr != nil {
log.Printf("error searching for %s in %s: %v", filepath.Base(mkvPath), destDir, err)
err = multierr.Append(err, newErr)
continue
} else if found {
// log.Printf("found %v. skipping linking", filepath.Base(mkvPath))
continue
}
switch w.mode {
case link:
log.Print("linking ", filepath.Base(mkvPath))
if newErr := os.Link(mkvPath, filepath.Join(destDir, filepath.Base(mkvPath))); newErr != nil {
err = multierr.Append(err, newErr)
continue
}
log.Print("successfully linked ", filepath.Base(mkvPath))
case copy:
log.Print("copying ", filepath.Base(mkvPath))
if newErr := copyMKV(mkvPath, destDir); newErr != nil {
err = multierr.Append(err, newErr)
continue
}
log.Print("successfully copied ", filepath.Base(mkvPath))
}
}
return true, err
}
func copyMKV(mkvPath, destDir string) error {
original, err := os.Open(mkvPath)
if err != nil {
return fmt.Errorf("opening %s: %w", mkvPath, err)
}
defer original.Close()
f, err := os.Create(filepath.Join(destDir, filepath.Base(mkvPath)))
if err != nil {
return fmt.Errorf("creating destination file: %w", err)
}
defer f.Close()
if _, err := io.Copy(f, original); err != nil {
return fmt.Errorf("copying %s: %w", filepath.Base(mkvPath), err)
}
return nil
}
|
package iterations
import (
"fmt"
"strings"
"testing"
)
func TestRepeat(t *testing.T) {
repetitionsNumber := 6
testChar := "a"
repetitions := Repeat(testChar, repetitionsNumber)
expected := strings.Repeat(testChar, repetitionsNumber)
if repetitions != expected {
t.Errorf("Expected: %s, but received: %s", expected, repetitions)
}
}
func BenchmarkRepeat(b *testing.B) {
for i := 0; i < b.N; i++ {
Repeat("a", 5)
}
}
func ExampleRepeat() {
repetitionsNumber := Repeat("A", 7)
fmt.Println(repetitionsNumber)
// Output: AAAAAAA
}
|
package main
import (
"flag"
"fmt"
"os"
"github.com/BurntSushi/toml"
"github.com/appconf/appconf"
//注册storage驱动
_ "github.com/appconf/storage/redis"
)
func cfgParser(filename string) (cfg appconf.Config, err error) {
_, err = toml.DecodeFile(filename, &cfg)
return
}
func exit(err error) {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
func main() {
var filename = flag.String("cfg", "config.toml", "the configuration file")
flag.Parse()
cfg, err := cfgParser(*filename)
if err != nil {
exit(err)
}
if err = appconf.Run(cfg); err != nil {
exit(err)
}
os.Exit(0)
}
|
package main
import "fmt"
type ErrNegativeSqrt struct {
What float64
}
func (e *ErrNegativeSqrt) Error() string {
return fmt.Sprintf("Cannot sqrt negative number : %v", e.What)
}
func run(x float64) error {
return &ErrNegativeSqrt{
x,
}
}
func main() {
fmt.Println(Sqrt(2))
fmt.Println(Sqrt(-2))
}
//Get square root using newton's method zn+1 = (zn - (((zn^2)-x)/(2*zn)))
func Sqrt(x float64) (float64, error) {
if x <= 0 {
return x, run(x)
}
z := 2.0
for i := 0; i < 10; i++ {
z = computeValue(z, x)
}
return z, nil
}
func computeValue(zn float64, x float64) (zn1 float64) {
num := numerator(zn, x)
den := (2 * zn)
zn1 = zn - (num / den)
return
}
func numerator(zn float64, x float64) float64 {
return (zn * zn) - x
}
|
package main
import (
"fmt"
"log"
"net/http"
)
func HelloServer(w http.ResponseWriter, req *http.Request) {
fmt.Println("Inside HelloServer hanndler")
fmt.Fprintf(w, "Hello,"+req.URL.Path[1:]) // 去掉前面的斜杠
}
func main() {
http.HandleFunc("/", HelloServer)
log.Fatal(http.ListenAndServe(":9527", nil))
}
|
package main
import "fmt"
const (
RED = 0
GREEN = 1
BLUE = 2
)
func min2(a, b int) int {
if a < b {
return a
}
return b
}
func min3(a, b, c int) int {
return min2(a, min2(b, c))
}
func calcMinAt(at int, withColor int, costs [][]int, minCosts [][]int) {
if at >= len(costs) {
return
}
if at == len(costs)-1 {
minCosts[at][RED] = costs[at][RED]
minCosts[at][GREEN] = costs[at][GREEN]
minCosts[at][BLUE] = costs[at][BLUE]
return
}
switch withColor {
case RED:
minCosts[at][RED] = costs[at][RED] + min2(minCosts[at+1][GREEN], minCosts[at+1][BLUE])
case BLUE:
minCosts[at][BLUE] = costs[at][BLUE] + min2(minCosts[at+1][GREEN], minCosts[at+1][RED])
case GREEN:
minCosts[at][GREEN] = costs[at][GREEN] + min2(minCosts[at+1][BLUE], minCosts[at+1][RED])
}
}
func minCost(costs [][]int) int {
n := len(costs)
if n == 0 {
return 0
}
minCosts := make([][]int, n)
for i := 0; i < n; i++ {
minCosts[i] = make([]int, 3)
}
for i := n - 1; i >= 0; i-- {
calcMinAt(i, RED, costs, minCosts)
calcMinAt(i, GREEN, costs, minCosts)
calcMinAt(i, BLUE, costs, minCosts)
}
result := min3(minCosts[0][RED], minCosts[0][BLUE], minCosts[0][GREEN])
return result
}
func test1() {
test1 := [][]int{
[]int{17, 2, 17},
[]int{16, 16, 5},
[]int{14, 3, 19},
}
result := minCost(test1)
fmt.Printf("result: %d\n", result)
}
func test2() {
test1 := [][]int{
[]int{17, 2, 17},
}
result := minCost(test1)
fmt.Printf("result: %d\n", result)
}
func main() {
test1()
test2()
}
|
package utils
import (
"github.com/aws/aws-lambda-go/events"
)
// Creates an ApiGatewayProxyResponse with CORS headers based on the given status code and marshalled json body
func CreateResponse(status int, body string) (events.APIGatewayProxyResponse, error) {
// Cloudflare - We support the GET, POST, HEAD, and OPTIONS methods from any origin,// and allow any header on requests. These headers must be present// on all responses to all CORS preflight requests. In practice, this means// all responses to OPTIONS requests.
response := events.APIGatewayProxyResponse{
StatusCode: status,
Headers: map[string]string{
"x-custom-header": "*",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET,HEAD,POST,OPTIONS",
"Access-Control-Max-Age": "86400",
},
Body: body,
}
return response, nil
}
|
package wire
import (
"bytes"
"io"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/protocol"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/qerr"
)
var _ = Describe("CONNECTION_CLOSE Frame", func() {
Context("when parsing", func() {
Context("in varint encoding", func() {
It("accepts sample frame", func() {
data := []byte{0x2, 0x0, 0x19}
data = append(data, encodeVarInt(0x1b)...) // reason phrase length
data = append(data, []byte{
'N', 'o', ' ', 'r', 'e', 'c', 'e', 'n', 't', ' ', 'n', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'a', 'c', 't', 'i', 'v', 'i', 't', 'y', '.',
}...)
b := bytes.NewReader(data)
frame, err := parseConnectionCloseFrame(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
Expect(frame.ErrorCode).To(Equal(qerr.ErrorCode(0x19)))
Expect(frame.ReasonPhrase).To(Equal("No recent network activity."))
Expect(b.Len()).To(BeZero())
})
It("rejects long reason phrases", func() {
data := []byte{0x2, 0xca, 0xfe}
data = append(data, encodeVarInt(0xffff)...) // reason phrase length
b := bytes.NewReader(data)
_, err := parseConnectionCloseFrame(b, versionIETFFrames)
Expect(err).To(MatchError(io.EOF))
})
It("errors on EOFs", func() {
data := []byte{0x2, 0x0, 0x19}
data = append(data, encodeVarInt(0x1b)...) // reason phrase length
data = append(data, []byte{
'N', 'o', ' ', 'r', 'e', 'c', 'e', 'n', 't', ' ', 'n', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'a', 'c', 't', 'i', 'v', 'i', 't', 'y', '.',
}...)
_, err := parseConnectionCloseFrame(bytes.NewReader(data), versionIETFFrames)
Expect(err).NotTo(HaveOccurred())
for i := range data {
_, err := parseConnectionCloseFrame(bytes.NewReader(data[0:i]), versionIETFFrames)
Expect(err).To(HaveOccurred())
}
})
It("parses a frame without a reason phrase", func() {
data := []byte{0x2, 0xca, 0xfe}
data = append(data, encodeVarInt(0)...)
b := bytes.NewReader(data)
frame, err := parseConnectionCloseFrame(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
Expect(frame.ReasonPhrase).To(BeEmpty())
Expect(b.Len()).To(BeZero())
})
})
Context("in big endian", func() {
It("accepts sample frame", func() {
b := bytes.NewReader([]byte{0x2,
0x0, 0x0, 0x0, 0x19, // error code
0x0, 0x1b, // reason phrase length
'N', 'o', ' ', 'r', 'e', 'c', 'e', 'n', 't', ' ', 'n', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'a', 'c', 't', 'i', 'v', 'i', 't', 'y', '.',
})
frame, err := parseConnectionCloseFrame(b, versionBigEndian)
Expect(err).ToNot(HaveOccurred())
Expect(frame.ErrorCode).To(Equal(qerr.ErrorCode(0x19)))
Expect(frame.ReasonPhrase).To(Equal("No recent network activity."))
Expect(b.Len()).To(BeZero())
})
It("rejects long reason phrases", func() {
b := bytes.NewReader([]byte{0x2,
0xad, 0xfb, 0xca, 0xde, // error code
0xff, 0x0, // reason phrase length
})
_, err := parseConnectionCloseFrame(b, versionBigEndian)
Expect(err).To(MatchError(io.EOF))
})
It("errors on EOFs", func() {
data := []byte{0x40,
0x19, 0x0, 0x0, 0x0, // error code
0x0, 0x1b, // reason phrase length
'N', 'o', ' ', 'r', 'e', 'c', 'e', 'n', 't', ' ', 'n', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'a', 'c', 't', 'i', 'v', 'i', 't', 'y', '.',
}
_, err := parseConnectionCloseFrame(bytes.NewReader(data), versionBigEndian)
Expect(err).NotTo(HaveOccurred())
for i := range data {
_, err := parseConnectionCloseFrame(bytes.NewReader(data[0:i]), versionBigEndian)
Expect(err).To(HaveOccurred())
}
})
It("parses a frame without a reason phrase", func() {
b := bytes.NewReader([]byte{0x2,
0xad, 0xfb, 0xca, 0xde, // error code
0x0, 0x0, // reason phrase length
})
frame, err := parseConnectionCloseFrame(b, versionBigEndian)
Expect(err).ToNot(HaveOccurred())
Expect(frame.ReasonPhrase).To(BeEmpty())
Expect(b.Len()).To(BeZero())
})
})
})
Context("when writing", func() {
Context("in varint encoding", func() {
It("writes a frame without a ReasonPhrase", func() {
b := &bytes.Buffer{}
frame := &ConnectionCloseFrame{
ErrorCode: 0xbeef,
}
err := frame.Write(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
expected := []byte{0x2, 0xbe, 0xef}
expected = append(expected, encodeVarInt(0)...)
Expect(b.Bytes()).To(Equal(expected))
})
It("writes a frame with a ReasonPhrase", func() {
b := &bytes.Buffer{}
frame := &ConnectionCloseFrame{
ErrorCode: 0xdead,
ReasonPhrase: "foobar",
}
err := frame.Write(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
expected := []byte{0x2, 0xde, 0xad}
expected = append(expected, encodeVarInt(6)...)
expected = append(expected, []byte{'f', 'o', 'o', 'b', 'a', 'r'}...)
Expect(b.Bytes()).To(Equal(expected))
})
It("has proper min length", func() {
b := &bytes.Buffer{}
f := &ConnectionCloseFrame{
ErrorCode: 0xcafe,
ReasonPhrase: "foobar",
}
err := f.Write(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
Expect(f.Length(versionIETFFrames)).To(Equal(protocol.ByteCount(b.Len())))
})
})
Context("in big endian", func() {
It("writes a frame without a ReasonPhrase", func() {
b := &bytes.Buffer{}
frame := &ConnectionCloseFrame{
ErrorCode: 0xdeadbeef,
}
err := frame.Write(b, versionBigEndian)
Expect(err).ToNot(HaveOccurred())
Expect(b.Len()).To(Equal(1 + 2 + 4))
Expect(b.Bytes()).To(Equal([]byte{0x2,
0xde, 0xad, 0xbe, 0xef, // error code
0x0, 0x0, // reason phrase length
}))
})
It("writes a frame with a ReasonPhrase", func() {
b := &bytes.Buffer{}
frame := &ConnectionCloseFrame{
ErrorCode: 0xdeadbeef,
ReasonPhrase: "foobar",
}
err := frame.Write(b, versionBigEndian)
Expect(err).ToNot(HaveOccurred())
Expect(b.Len()).To(Equal(1 + 2 + 4 + len(frame.ReasonPhrase)))
Expect(b.Bytes()).To(Equal([]byte{0x2,
0xde, 0xad, 0xbe, 0xef, // error code
0x0, 0x6, // reason phrase length
'f', 'o', 'o', 'b', 'a', 'r',
}))
})
It("has proper min length", func() {
b := &bytes.Buffer{}
f := &ConnectionCloseFrame{
ErrorCode: 0xcafe,
ReasonPhrase: "foobar",
}
err := f.Write(b, versionBigEndian)
Expect(err).ToNot(HaveOccurred())
Expect(f.Length(versionBigEndian)).To(Equal(protocol.ByteCount(b.Len())))
})
})
})
})
|
package main
import "fmt"
var quine = `package main
import "fmt"
var quine =
var quote = string(96)
func main() {
fmt.Println(quine[:40] + quote + quine + quote + "\n" + quine[41:])
}`
var quote = string(96)
func main() {
fmt.Println(quine[:40] + quote + quine + quote + "\n" + quine[41:])
}
|
package actions
import (
"database/sql"
"errors"
"github.com/barrydev/api-3h-shop/src/common/connect"
"github.com/barrydev/api-3h-shop/src/factories"
"github.com/barrydev/api-3h-shop/src/model"
"strings"
)
func UpdateCategory(categoryId int64, body *model.BodyCategory) (*model.Category, error) {
queryString := ""
var args []interface{}
var set []string
if body.Name != nil {
set = append(set, " name=?")
args = append(args, body.Name)
}
if body.ImagePath != nil {
set = append(set, " image_path=?")
args = append(args, body.ImagePath)
}
if body.ParentId != nil {
if *body.ParentId == -1 {
set = append(set, " parent_id=?")
args = append(args, &sql.NullInt64{
Int64: *body.ParentId,
Valid: false,
})
} else {
//parentCat, err := factories.FindCategoryById(*body.ParentId)
//
//if err != nil {
// return nil, err
//}
//
//if parentCat == nil {
// return nil, errors.New("parent category does not exists")
//}
set = append(set, " parent_id=?")
args = append(args, &sql.NullInt64{
Int64: *body.ParentId,
Valid: true,
})
}
}
if len(set) > 0 {
queryString += "SET" + strings.Join(set, ",") + "\n"
} else {
category, err := factories.FindCategoryById(categoryId)
if err != nil {
return nil, err
}
if category == nil {
return nil, errors.New("category does not exists")
}
return category, nil
}
queryString += "WHERE _id=?"
args = append(args, categoryId)
rowEffected, err := factories.UpdateCategory(&connect.QueryMySQL{
QueryString: queryString,
Args: args,
})
if err != nil {
return nil, err
}
if rowEffected == nil {
return nil, errors.New("update error")
}
return GetCategoryById(categoryId)
}
|
package sync
import (
"bitbucket.org/avanz/anotherPomodoro/common"
"bitbucket.org/avanz/anotherPomodoro/repository"
"bufio"
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
"time"
)
type Listener struct {
repository repository.IPomodoroRepository
sharedAddress string
sharedPort int
}
type IListener interface {
Start()
}
func NewListener(repository repository.IPomodoroRepository) IListener {
l := &Listener{repository: repository}
timeShareAddressAndPort := ""
err := repository.Read("settings", "timeShareAddressAndPort", &timeShareAddressAndPort)
if err != nil {
common.MainErrorListener <- err
}
split := strings.Split(timeShareAddressAndPort, ":")
l.sharedAddress = split[0]
if len(split) > 1 {
l.sharedPort, err = strconv.Atoi(split[1])
if err != nil {
common.MainErrorListener <- err
}
}
return l
}
func (l *Listener) Start() {
countConnection := 0
go func(listener *Listener) {
if listener.sharedAddress == "" {
listener.sharedAddress = "127.0.0.1"
}
if listener.sharedPort == 0 {
listener.sharedPort = 1234
}
l, err := net.Listen("tcp4", fmt.Sprintf("%s:%d", listener.sharedAddress, listener.sharedPort))
if err != nil {
common.MainErrorListener <- err
return
}
defer l.Close()
for {
c, err := l.Accept()
if err != nil {
common.MainErrorListener <- err
return
}
go listener.handleConnection(c)
countConnection++
}
}(l)
}
func (l *Listener) handleConnection(c net.Conn) {
fmt.Print(".")
for {
netData, err := bufio.NewReader(c).ReadString('\n')
if err != nil {
common.MainErrorListener <- err
}
temp := strings.TrimSpace(netData)
if temp == "STOP" {
break
}
var currentTimerValue string
err = l.repository.Read("current", "timerValue", ¤tTimerValue)
if err != nil {
common.MainErrorListener <- err
}
var timeDuration = int((25 * time.Minute).Seconds())
err = l.repository.Read("settings", "timeDuration", &timeDuration)
if err != nil {
common.MainErrorListener <- err
}
var pauseDuration = int((5 * time.Minute).Seconds())
err = l.repository.Read("settings", "pauseDuration", &pauseDuration)
if err != nil {
common.MainErrorListener <- err
}
currentPomodoro := struct {
TimeDuration int
PauseDuration int
CurrentTimerValue string
}{timeDuration, pauseDuration, currentTimerValue}
currentPomodoroJson, err := json.Marshal(currentPomodoro)
if err != nil {
common.MainErrorListener <- err
}
c.Write([]byte(string(currentPomodoroJson) + "\n"))
}
c.Close()
}
|
package chats
import "websocket_chat/store/message"
var (
broadcast = make(chan message.Message)
)
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"unsafe"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
// All the AggFunc implementations are listed here for navigation.
var (
// All the AggFunc implementations for "COUNT" are listed here.
_ AggFunc = (*countPartial)(nil)
_ AggFunc = (*countOriginal4Int)(nil)
_ AggFunc = (*countOriginal4Real)(nil)
_ AggFunc = (*countOriginal4Decimal)(nil)
_ AggFunc = (*countOriginal4Time)(nil)
_ AggFunc = (*countOriginal4Duration)(nil)
_ AggFunc = (*countOriginal4JSON)(nil)
_ AggFunc = (*countOriginal4String)(nil)
_ AggFunc = (*countOriginalWithDistinct4Int)(nil)
_ AggFunc = (*countOriginalWithDistinct4Real)(nil)
_ AggFunc = (*countOriginalWithDistinct4Decimal)(nil)
_ AggFunc = (*countOriginalWithDistinct4Duration)(nil)
_ AggFunc = (*countOriginalWithDistinct4String)(nil)
_ AggFunc = (*countOriginalWithDistinct)(nil)
// All the AggFunc implementations for "APPROX_COUNT_DISTINCT" are listed here.
_ AggFunc = (*approxCountDistinctOriginal)(nil)
_ AggFunc = (*approxCountDistinctPartial1)(nil)
_ AggFunc = (*approxCountDistinctPartial2)(nil)
_ AggFunc = (*approxCountDistinctFinal)(nil)
// All the AggFunc implementations for "APPROX_PERCENTILE" are listed here.
_ AggFunc = (*percentileOriginal4Int)(nil)
_ AggFunc = (*percentileOriginal4Real)(nil)
_ AggFunc = (*percentileOriginal4Decimal)(nil)
// All the AggFunc implementations for "FIRSTROW" are listed here.
_ AggFunc = (*firstRow4Decimal)(nil)
_ AggFunc = (*firstRow4Int)(nil)
_ AggFunc = (*firstRow4Time)(nil)
_ AggFunc = (*firstRow4String)(nil)
_ AggFunc = (*firstRow4Duration)(nil)
_ AggFunc = (*firstRow4Float32)(nil)
_ AggFunc = (*firstRow4Float64)(nil)
_ AggFunc = (*firstRow4JSON)(nil)
_ AggFunc = (*firstRow4Enum)(nil)
_ AggFunc = (*firstRow4Set)(nil)
// All the AggFunc implementations for "MAX"/"MIN" are listed here.
_ AggFunc = (*maxMin4Int)(nil)
_ AggFunc = (*maxMin4Uint)(nil)
_ AggFunc = (*maxMin4Float32)(nil)
_ AggFunc = (*maxMin4Float64)(nil)
_ AggFunc = (*maxMin4Decimal)(nil)
_ AggFunc = (*maxMin4String)(nil)
_ AggFunc = (*maxMin4Duration)(nil)
_ AggFunc = (*maxMin4JSON)(nil)
_ AggFunc = (*maxMin4Enum)(nil)
_ AggFunc = (*maxMin4Set)(nil)
// All the AggFunc implementations for "AVG" are listed here.
_ AggFunc = (*avgOriginal4Decimal)(nil)
_ AggFunc = (*avgOriginal4DistinctDecimal)(nil)
_ AggFunc = (*avgPartial4Decimal)(nil)
_ AggFunc = (*avgOriginal4Float64)(nil)
_ AggFunc = (*avgPartial4Float64)(nil)
_ AggFunc = (*avgOriginal4DistinctFloat64)(nil)
// All the AggFunc implementations for "SUM" are listed here.
_ AggFunc = (*sum4DistinctFloat64)(nil)
_ AggFunc = (*sum4DistinctDecimal)(nil)
_ AggFunc = (*sum4Decimal)(nil)
_ AggFunc = (*sum4Float64)(nil)
// All the AggFunc implementations for "GROUP_CONCAT" are listed here.
_ AggFunc = (*groupConcatDistinct)(nil)
_ AggFunc = (*groupConcat)(nil)
// All the AggFunc implementations for "BIT_OR" are listed here.
_ AggFunc = (*bitOrUint64)(nil)
// All the AggFunc implementations for "BIT_XOR" are listed here.
_ AggFunc = (*bitXorUint64)(nil)
// All the AggFunc implementations for "BIT_AND" are listed here.
_ AggFunc = (*bitAndUint64)(nil)
// All the AggFunc implementations for "JSON_ARRAYAGG" are listed here
_ AggFunc = (*jsonArrayagg)(nil)
// All the AggFunc implementations for "JSON_OBJECTAGG" are listed here
_ AggFunc = (*jsonObjectAgg)(nil)
)
const (
// DefUint32Size is the size of uint32
DefUint32Size = int64(unsafe.Sizeof(uint32(0)))
// DefUint64Size is the size of uint64
DefUint64Size = int64(unsafe.Sizeof(uint64(0)))
// DefInt64Size is the size of int64
DefInt64Size = int64(unsafe.Sizeof(int64(0)))
// DefFloat64Size is the size of float64
DefFloat64Size = int64(unsafe.Sizeof(float64(0)))
// DefTimeSize is the size of time
DefTimeSize = int64(unsafe.Sizeof(types.Time{}))
// DefRowSize is the size of row
DefRowSize = int64(unsafe.Sizeof(chunk.Row{}))
// DefBoolSize is the size of bool
DefBoolSize = int64(unsafe.Sizeof(false))
// DefInterfaceSize is the size of interface
DefInterfaceSize = int64(16)
// DefMyDecimalSize is the size of MyDecimal
DefMyDecimalSize = int64(unsafe.Sizeof(types.MyDecimal{}))
// DefDurationSize is the size of duration
DefDurationSize = int64(unsafe.Sizeof(types.Duration{}))
)
// PartialResult represents data structure to store the partial result for the
// aggregate functions. Here we use unsafe.Pointer to allow the partial result
// to be any type.
type PartialResult unsafe.Pointer
// AggFunc is the interface to evaluate the aggregate functions.
type AggFunc interface {
// AllocPartialResult allocates a specific data structure to store the
// partial result, initializes it, and converts it to PartialResult to
// return back. The second returned value is the memDelta used to trace
// memory usage. Aggregate operator implementation, no matter it's a hash
// or stream, should hold this allocated PartialResult for the further
// operations like: "ResetPartialResult", "UpdatePartialResult".
AllocPartialResult() (pr PartialResult, memDelta int64)
// ResetPartialResult resets the partial result to the original state for a
// specific aggregate function. It converts the input PartialResult to the
// specific data structure which stores the partial result and then reset
// every field to the proper original state.
ResetPartialResult(pr PartialResult)
// UpdatePartialResult updates the specific partial result for an aggregate
// function using the input rows which all belonging to the same data group.
// It converts the PartialResult to the specific data structure which stores
// the partial result and then iterates on the input rows and update that
// partial result according to the functionality and the state of the
// aggregate function. The returned value is the memDelta used to trace memory
// usage.
UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error)
// MergePartialResult will be called in the final phase when parallelly
// executing. It converts the PartialResult `src`, `dst` to the same specific
// data structure which stores the partial results, and then evaluate the
// final result using the partial results as input values. The returned value
// is the memDelta used to trace memory usage.
MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) (memDelta int64, err error)
// AppendFinalResult2Chunk finalizes the partial result and append the
// final result to the input chunk. Like other operations, it converts the
// input PartialResult to the specific data structure which stores the
// partial result and then calculates the final result and append that
// final result to the chunk provided.
AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error
}
type baseAggFunc struct {
// args stores the input arguments for an aggregate function, we should
// call arg.EvalXXX to get the actual input data for this function.
args []expression.Expression
// ordinal stores the ordinal of the columns in the output chunk, which is
// used to append the final result of this function.
ordinal int
// retTp means the target type of the final agg should return.
retTp *types.FieldType
}
func (*baseAggFunc) MergePartialResult(sessionctx.Context, PartialResult, PartialResult) (memDelta int64, err error) {
return 0, nil
}
// SlidingWindowAggFunc is the interface to evaluate the aggregate functions using sliding window.
type SlidingWindowAggFunc interface {
// Slide evaluates the aggregate functions using a sliding window. The input
// lastStart and lastEnd are the interval of the former sliding window,
// shiftStart, shiftEnd mean the sliding window offset. Note that the input
// PartialResult stores the intermediate result which will be used in the next
// sliding window, ensure call ResetPartialResult after a frame are evaluated
// completely.
Slide(sctx sessionctx.Context, getRow func(uint64) chunk.Row, lastStart, lastEnd uint64, shiftStart, shiftEnd uint64, pr PartialResult) error
}
// MaxMinSlidingWindowAggFunc is the interface to evaluate the max/min agg function using sliding window
type MaxMinSlidingWindowAggFunc interface {
// SetWindowStart sets the start position of window
SetWindowStart(start uint64)
}
|
package server
import (
"machinedetector/mdpb"
"github.com/golang/protobuf/proto"
"github.com/garyburd/redigo/redis"
"github.com/golang/glog"
"io/ioutil"
"os"
"fmt"
"strconv"
"strings"
"time"
)
var redisIndex int = 0
func GetHosts(mc *RedisConn, bc *RedisConn, dbname string) ([]string, error) {
hostlistkey := HostListKey
var redisConns [2]*RedisConn = [2]*RedisConn{mc, bc}
redisIndex++
i := redisIndex % 2
if redisIndex > 99999 {
redisIndex = 0
}
reply, err := redisConns[i].SafeDo("GET", hostlistkey)
if err != nil {
reply, err = redisConns[1-i].SafeDo("GET", hostlistkey)
if err != nil {
return nil, err
}
}
byteslice, err := redis.Bytes(reply, nil)
pPkt := new(mdpb.StringVec)
err = proto.Unmarshal(byteslice, pPkt)
if err != nil {
glog.Warningf("ParseFromStringError for [%s", hostlistkey)
return nil, err
}
glog.Infof("Datasource [%s:%d:%s]", pPkt.GetDbHost(), pPkt.GetDbPort(), pPkt.GetDbName())
if pPkt.GetDbHost() != dbname {
glog.Warningf("[%s] data may be polluted by [%s]", hostlistkey, pPkt.GetLocalhost())
return nil, fmt.Errorf("[%s] data may be polluted by [%s]", hostlistkey, pPkt.GetLocalhost())
}
// Allocates a slice of length 0 and capacity HostNum
// hosts := make([]string, 0, HostNum)
return pPkt.GetStrs(), nil
}
func GetAllIsps(mc *RedisConn, bc *RedisConn, dbname string) (map[string]int, error) {
m := make(map[string]int)
hn, err := os.Hostname()
if err != nil {
glog.Warningf("os.Hostname() error %v", err)
} else {
m[hn] = 1
}
inspectorKey := InspectorHashMap
var redisConns [2]*RedisConn = [2]*RedisConn{mc, bc}
redisIndex++
i := redisIndex % 2
if redisIndex > 99999 {
redisIndex = 0
}
reply, err := redisConns[i].SafeDo("HGETALL", inspectorKey)
if err != nil {
reply, err = redisConns[1-i].SafeDo("HGETALL", inspectorKey)
if err != nil {
return m, nil
}
}
fields, err := redis.Values(reply, nil)
n := len(fields)
if n < 2 || n%2 != 0 {
glog.Warningf("len(fields)=%d", n)
return m, nil
}
for i := 0; i < n; i += 2 {
glog.Infof("%s => %s", fields[i], fields[i+1])
key, _ := redis.String(fields[i], nil)
value, _ := redis.String(fields[i+1], nil)
sps := strings.Split(value, "_")
if len(sps) != 2 || sps[1] != dbname {
glog.Warningf("[%s] been Attack? [%s] [%s]", inspectorKey, key, value)
continue
}
ts, err := strconv.Atoi(sps[0])
if err != nil {
glog.Warningf("Parse [%s] to int faild, [%s] [%s]", sps[0], key, value)
continue
}
if time.Now().Unix()-int64(ts) > InspectorTolerateOff {
glog.Infof("[%s] is Offline", key)
continue
}
glog.Infof("[%s] is Ok", key)
m[key] = 1
}
return m, nil
}
// https://github.com/garyburd/redigo/issues/21
func SetHostStats(mc *RedisConn, bc *RedisConn, hlist []string) {
t := time.Now().Unix()
args := []interface{}{MaybeDetectedDeadHost}
for _, v := range hlist {
args = append(args, v, t)
}
if _, err := mc.SafeDo("HMSET", args...); err != nil {
glog.Warningf("SetHostStats Failed, [%s] is down?", mc.HostPort())
} else {
glog.Infof("SetHostStats Ok, [%s]", mc.HostPort())
}
if _, err := bc.SafeDo("HMSET", args...); err != nil {
glog.Warningf("SetHostStats Failed, [%s] is down?", bc.HostPort())
} else {
glog.Infof("SetHostStats Ok, [%s]", bc.HostPort())
}
}
func UnlinkOldFile(dir string, ttlSec int64) {
files, err := ioutil.ReadDir(dir)
t := time.Now()
if err != nil {
glog.Warningf("ReadDir [%s] error[%v]", dir, err)
return
}
for _, f := range files {
if f.Mode().IsRegular() == false {
glog.Infof("[%s] Not Regular, Continue", f.Name())
continue
}
if t.Unix()-f.ModTime().Unix() > ttlSec {
filename := dir + f.Name()
glog.Infof("Try to Remove [%s]", filename)
if err = os.Remove(filename); err != nil {
glog.Warningf("Remove [%s] failed [%v]", filename, err)
}
}
}
}
|
package shttp
import (
"log"
"net/http"
"sync/atomic"
"unsafe"
)
func NewAuthMux(login, pass string) *AuthMux {
return &AuthMux{
*http.NewServeMux(),
&login,
&pass,
}
}
type AuthMux struct {
http.ServeMux
login, pass *string
}
func (m *AuthMux) ChangeCreds(login, pass string) {
atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&m.login)), unsafe.Pointer(&login))
atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&m.pass)), unsafe.Pointer(&pass))
}
func (m *AuthMux) Handle(pattern string, handler http.Handler) {
m.ServeMux.Handle(pattern, m.HTTPBasicAuth(handler))
}
func (m *AuthMux) HTTPBasicAuth(handler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
login, password, _ := r.BasicAuth()
if *m.login != login || password != *m.pass {
log.Println("[ERROR] Unauthorized HTTP access: wrong login/password!")
w.WriteHeader(http.StatusUnauthorized)
return
}
handler.ServeHTTP(w, r)
}
}
|
package blog
import (
"github.com/jinzhu/gorm"
"mingchuan.me/api"
"mingchuan.me/pkg/morloop"
)
const (
BlogServiceVersion = 3
MaxTitleChars = 120
MaxArticleChars = 120000
)
type mRouter = *morloop.Router
type BlogValidations struct {
MaxTitleChars uint32
MaxArticleChars uint32
}
// BlogService - blog service main data struct
type BlogService struct {
*gorm.DB
Version uint16
Validations BlogValidations
}
// NewService - new blog service object
func NewService(db *gorm.DB) *BlogService {
return &BlogService{
DB: db,
// Hardcore version number
Version: BlogServiceVersion,
// init validations
Validations: BlogValidations{
MaxTitleChars: MaxTitleChars,
MaxArticleChars: MaxArticleChars,
},
}
}
// Init - init service before doing any executions
func (blog *BlogService) Init() error {
db := blog.DB
if err1 := db.AutoMigrate(&Article{}).Error; err1 != nil {
return err1
}
if err2 := db.AutoMigrate(&ArticleEventLog{}).Error; err2 != nil {
return err2
}
return nil
}
// SetValidations - override default blog validation settings
func (blog *BlogService) SetValidations(validations BlogValidations) {
blog.Validations = validations
}
// RegisterAPI -
func (blog *BlogService) RegisterAPI(api *api.API) {
pCtrl := CreatePostController(api, blog)
// admin
pCtrl.CreatePost()
pCtrl.GetOnePost()
pCtrl.PublishPost()
pCtrl.UpdatePost()
pCtrl.DeletePost()
pCtrl.ListAllPosts()
// public
pCtrl.GetOnePublicPost()
pCtrl.ListAllPublicPosts()
}
|
package log
import (
"net"
"net/http"
"time"
"github.com/rs/zerolog"
"github.com/pomerium/pomerium/internal/middleware/responsewriter"
"github.com/pomerium/pomerium/internal/telemetry/requestid"
)
// NewHandler injects log into requests context.
func NewHandler(getLogger func() *zerolog.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Create a copy of the logger (including internal context slice)
// to prevent data race when using UpdateContext.
l := getLogger().With().Logger()
r = r.WithContext(l.WithContext(r.Context()))
next.ServeHTTP(w, r)
})
}
}
// RemoteAddrHandler adds the request's remote address as a field to the context's logger
// using fieldKey as field key.
func RemoteAddrHandler(fieldKey string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if host, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
log := zerolog.Ctx(r.Context())
log.UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Str(fieldKey, host)
})
}
next.ServeHTTP(w, r)
})
}
}
// UserAgentHandler adds the request's user-agent as a field to the context's logger
// using fieldKey as field key.
func UserAgentHandler(fieldKey string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if ua := r.Header.Get("User-Agent"); ua != "" {
log := zerolog.Ctx(r.Context())
log.UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Str(fieldKey, ua)
})
}
next.ServeHTTP(w, r)
})
}
}
// RefererHandler adds the request's referer as a field to the context's logger
// using fieldKey as field key.
func RefererHandler(fieldKey string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if ref := r.Header.Get("Referer"); ref != "" {
log := zerolog.Ctx(r.Context())
log.UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Str(fieldKey, ref)
})
}
next.ServeHTTP(w, r)
})
}
}
// RequestIDHandler adds the request's id as a field to the context's logger
// using fieldKey as field key.
func RequestIDHandler(fieldKey string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := requestid.FromContext(r.Context())
if requestID != "" {
log := zerolog.Ctx(r.Context())
log.UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Str(fieldKey, requestID)
})
}
next.ServeHTTP(w, r)
})
}
}
// AccessHandler returns a handler that call f after each request.
func AccessHandler(f func(r *http.Request, status, size int, duration time.Duration)) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
lw := responsewriter.NewWrapResponseWriter(w, r.ProtoMajor)
next.ServeHTTP(lw, r)
f(r, lw.Status(), lw.BytesWritten(), time.Since(start))
})
}
}
// HeadersHandler adds the provided set of header keys to the log context.
//
// https://tools.ietf.org/html/rfc7239
// https://en.wikipedia.org/wiki/X-Forwarded-For
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For
func HeadersHandler(headers []string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, key := range headers {
if values := r.Header[key]; len(values) != 0 {
log := zerolog.Ctx(r.Context())
log.UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Strs(key, values)
})
}
}
next.ServeHTTP(w, r)
})
}
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package flexibleengine
import (
"encoding/base64"
"fmt"
"net"
"net/http"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/pengux/check"
filters "github.com/CS-SI/SafeScale/providers/filters/images"
"github.com/CS-SI/SafeScale/providers/model"
"github.com/CS-SI/SafeScale/providers/model/enums/HostProperty"
"github.com/CS-SI/SafeScale/providers/model/enums/HostState"
"github.com/CS-SI/SafeScale/providers/model/enums/IPVersion"
propsv1 "github.com/CS-SI/SafeScale/providers/model/properties/v1"
"github.com/CS-SI/SafeScale/providers/openstack"
"github.com/CS-SI/SafeScale/providers/userdata"
"github.com/CS-SI/SafeScale/utils"
"github.com/CS-SI/SafeScale/utils/retry"
uuid "github.com/satori/go.uuid"
gc "github.com/gophercloud/gophercloud"
nics "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces"
exbfv "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
"github.com/gophercloud/gophercloud/pagination"
)
type gpuCfg struct {
GPUNumber int
GPUType string
}
var gpuMap = map[string]gpuCfg{
"g1.xlarge": gpuCfg{
GPUNumber: 1,
GPUType: "UNKNOW",
},
"g1.2xlarge": gpuCfg{
GPUNumber: 1,
GPUType: "UNKNOW",
},
"g1.2xlarge.8": gpuCfg{
GPUNumber: 1,
GPUType: "NVIDIA 1080 TI",
},
}
type blockDevice struct {
// SourceType must be one of: "volume", "snapshot", "image", or "blank".
SourceType exbfv.SourceType `json:"source_type" required:"true"`
// UUID is the unique identifier for the existing volume, snapshot, or
// image (see above).
UUID string `json:"uuid,omitempty"`
// BootIndex is the boot index. It defaults to 0.
BootIndex string `json:"boot_index,omitempty"`
// DeleteOnTermination specifies whether or not to delete the attached volume
// when the server is deleted. Defaults to `false`.
DeleteOnTermination bool `json:"delete_on_termination"`
// DestinationType is the type that gets created. Possible values are "volume"
// and "local".
DestinationType exbfv.DestinationType `json:"destination_type,omitempty"`
// GuestFormat specifies the format of the block device.
GuestFormat string `json:"guest_format,omitempty"`
// VolumeSize is the size of the volume to create (in gigabytes). This can be
// omitted for existing volumes.
VolumeSize int `json:"volume_size,omitempty"`
// Type of volume
VolumeType string `json:"volume_type,omitempty"`
}
// CreateOptsExt is a structure that extends the server `CreateOpts` structure
// by allowing for a block device mapping.
type bootdiskCreateOptsExt struct {
servers.CreateOptsBuilder
BlockDevice []blockDevice `json:"block_device_mapping_v2,omitempty"`
}
// ToServerCreateMap adds the block device mapping option to the base server
// creation options.
func (opts bootdiskCreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) {
base, err := opts.CreateOptsBuilder.ToServerCreateMap()
if err != nil {
return nil, err
}
if len(opts.BlockDevice) == 0 {
err := gc.ErrMissingInput{}
err.Argument = "bootfromvolume.CreateOptsExt.BlockDevice"
return nil, err
}
serverMap := base["server"].(map[string]interface{})
blkDevices := make([]map[string]interface{}, len(opts.BlockDevice))
for i, bd := range opts.BlockDevice {
b, err := gc.BuildRequestBody(bd, "")
if err != nil {
return nil, err
}
blkDevices[i] = b
}
serverMap["block_device_mapping_v2"] = blkDevices
return base, nil
}
type serverCreateOpts struct {
// Name is the name to assign to the newly launched server.
Name string `json:"name" required:"true"`
// ImageRef [optional; required if ImageName is not provided] is the ID or
// full URL to the image that contains the server's OS and initial state.
// Also optional if using the boot-from-volume extension.
ImageRef string `json:"imageRef,omitempty"`
// ImageName [optional; required if ImageRef is not provided] is the name of
// the image that contains the server's OS and initial state.
// Also optional if using the boot-from-volume extension.
ImageName string `json:"-,omitempty"`
// FlavorRef [optional; required if FlavorName is not provided] is the ID or
// full URL to the flavor that describes the server's specs.
FlavorRef string `json:"flavorRef"`
// FlavorName [optional; required if FlavorRef is not provided] is the name of
// the flavor that describes the server's specs.
FlavorName string `json:"-"`
// SecurityGroups lists the names of the security groups to which this server
// should belong.
SecurityGroups []string `json:"-"`
// UserData contains configuration information or scripts to use upon launch.
// Create will base64-encode it for you, if it isn't already.
UserData []byte `json:"-"`
// AvailabilityZone in which to launch the server.
AvailabilityZone string `json:"availability_zone,omitempty"`
// Networks dictates how this server will be attached to available networks.
// By default, the server will be attached to all isolated networks for the
// tenant.
Networks []servers.Network `json:"-"`
// Metadata contains key-value pairs (up to 255 bytes each) to attach to the
// server.
Metadata map[string]string `json:"metadata,omitempty"`
// Personality includes files to inject into the server at launch.
// Create will base64-encode file contents for you.
Personality servers.Personality `json:"personality,omitempty"`
// ConfigDrive enables metadata injection through a configuration drive.
ConfigDrive *bool `json:"config_drive,omitempty"`
// AdminPass sets the root user password. If not set, a randomly-generated
// password will be created and returned in the response.
AdminPass string `json:"adminPass,omitempty"`
// AccessIPv4 specifies an IPv4 address for the instance.
AccessIPv4 string `json:"accessIPv4,omitempty"`
// AccessIPv6 pecifies an IPv6 address for the instance.
AccessIPv6 string `json:"accessIPv6,omitempty"`
// ServiceClient will allow calls to be made to retrieve an image or
// flavor ID by name.
ServiceClient *gc.ServiceClient `json:"-"`
}
// ToServerCreateMap assembles a request body based on the contents of a
// CreateOpts.
func (opts serverCreateOpts) ToServerCreateMap() (map[string]interface{}, error) {
sc := opts.ServiceClient
opts.ServiceClient = nil
b, err := gc.BuildRequestBody(opts, "")
if err != nil {
return nil, err
}
if opts.UserData != nil {
var userData string
if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil {
userData = base64.StdEncoding.EncodeToString(opts.UserData)
} else {
userData = string(opts.UserData)
}
b["user_data"] = &userData
}
if len(opts.SecurityGroups) > 0 {
securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups))
for i, groupName := range opts.SecurityGroups {
securityGroups[i] = map[string]interface{}{"name": groupName}
}
b["security_groups"] = securityGroups
}
if len(opts.Networks) > 0 {
networks := make([]map[string]interface{}, len(opts.Networks))
for i, net := range opts.Networks {
networks[i] = make(map[string]interface{})
if net.UUID != "" {
networks[i]["uuid"] = net.UUID
}
if net.Port != "" {
networks[i]["port"] = net.Port
}
if net.FixedIP != "" {
networks[i]["fixed_ip"] = net.FixedIP
}
}
b["networks"] = networks
}
// If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID.
if opts.FlavorRef == "" {
if opts.FlavorName == "" {
err := servers.ErrNeitherFlavorIDNorFlavorNameProvided{}
err.Argument = "FlavorRef/FlavorName"
return nil, err
}
if sc == nil {
err := servers.ErrNoClientProvidedForIDByName{}
err.Argument = "ServiceClient"
return nil, err
}
flavorID, err := flavors.IDFromName(sc, opts.FlavorName)
if err != nil {
return nil, err
}
b["flavorRef"] = flavorID
}
return map[string]interface{}{"server": b}, nil
}
// CreateHost creates a new host
func (client *Client) CreateHost(request model.HostRequest) (*model.Host, error) {
//msgFail := "Failed to create Host resource: %s"
msgSuccess := fmt.Sprintf("Host resource '%s' created successfully", request.ResourceName)
if request.DefaultGateway == nil && !request.PublicIP {
return nil, model.ResourceInvalidRequestError("host creation", "can't create a host without network and without public access (would be unreachable)")
}
// Validating name of the host
if ok, err := validatehostName(request); !ok {
return nil, fmt.Errorf("name '%s' is invalid for a FlexibleEngine Host: %s", request.ResourceName, openstack.ProviderErrorToString(err))
}
// The Default Network is the first of the provided list, by convention
defaultNetwork := request.Networks[0]
defaultNetworkID := defaultNetwork.ID
defaultGateway := request.DefaultGateway
isGateway := defaultGateway == nil && defaultNetwork.Name != model.SingleHostNetworkName
defaultGatewayID := ""
defaultGatewayPrivateIP := ""
if defaultGateway != nil {
hostNetworkV1 := propsv1.NewHostNetwork()
err := defaultGateway.Properties.Get(HostProperty.NetworkV1, hostNetworkV1)
if err != nil {
return nil, err
}
defaultGatewayPrivateIP = hostNetworkV1.IPv4Addresses[defaultNetworkID]
defaultGatewayID = defaultGateway.ID
}
var nets []servers.Network
// Add private networks
for _, n := range request.Networks {
nets = append(nets, servers.Network{
UUID: n.ID,
})
}
// If no key pair is supplied create one
if request.KeyPair == nil {
id, err := uuid.NewV4()
if err != nil {
return nil, fmt.Errorf("error creating UID : %v", err)
}
name := fmt.Sprintf("%s_%s", request.ResourceName, id)
request.KeyPair, err = client.CreateKeyPair(name)
if err != nil {
msg := fmt.Sprintf("failed to create host key pair: %+v", err)
log.Debugf(utils.TitleFirst(msg))
}
}
// --- prepares data structures for Provider usage ---
// Constructs userdata content
userData, err := userdata.Prepare(client, request, request.KeyPair, defaultNetwork.CIDR)
if err != nil {
msg := fmt.Sprintf("failed to prepare user data content: %+v", err)
log.Debugf(utils.TitleFirst(msg))
return nil, fmt.Errorf(msg)
}
// Determine system disk size based on vcpus count
template, err := client.GetTemplate(request.TemplateID)
if err != nil {
return nil, fmt.Errorf("Failed to get image: %s", openstack.ProviderErrorToString(err))
}
// Determines appropriate disk size
var diskSize int
if template.HostSize.DiskSize > 0 {
diskSize = template.HostSize.DiskSize
} else if template.HostSize.Cores < 16 {
diskSize = 100
} else if template.HostSize.Cores < 32 {
diskSize = 200
} else {
diskSize = 400
}
// Select useable availability zone, the first one in the list
azList, err := client.ListAvailabilityZones(false)
if err != nil {
return nil, err
}
var az string
for az = range azList {
break
}
log.Debugf("Selected Availability Zone: '%s'", az)
// Defines boot disk
bootdiskOpts := blockDevice{
SourceType: exbfv.SourceImage,
DestinationType: exbfv.DestinationVolume,
BootIndex: "0",
DeleteOnTermination: true,
UUID: request.ImageID,
VolumeType: "SSD",
VolumeSize: diskSize,
}
// Defines server
srvOpts := serverCreateOpts{
Name: request.ResourceName,
SecurityGroups: []string{client.SecurityGroup.Name},
Networks: nets,
FlavorRef: request.TemplateID,
UserData: userData,
AvailabilityZone: az,
}
// Defines host "Extension bootfromvolume" options
bdOpts := bootdiskCreateOptsExt{
CreateOptsBuilder: srvOpts,
BlockDevice: []blockDevice{bootdiskOpts},
}
b, err := bdOpts.ToServerCreateMap()
if err != nil {
return nil, fmt.Errorf("failed to build query to create host '%s': %s", request.ResourceName, openstack.ProviderErrorToString(err))
}
// --- Initializes model.Host ---
host := model.NewHost()
host.PrivateKey = request.KeyPair.PrivateKey // Add PrivateKey to host definition
hostNetworkV1 := propsv1.NewHostNetwork()
hostNetworkV1.IsGateway = isGateway
hostNetworkV1.DefaultNetworkID = defaultNetworkID
hostNetworkV1.DefaultGatewayID = defaultGatewayID
hostNetworkV1.DefaultGatewayPrivateIP = defaultGatewayPrivateIP
// Updates Host property NetworkV1
err = host.Properties.Set(HostProperty.NetworkV1, hostNetworkV1)
if err != nil {
return nil, err
}
// Adds Host property SizingV1
err = host.Properties.Set(HostProperty.SizingV1, &propsv1.HostSizing{
// Note: from there, no idea what was the RequestedSize; caller will have to complement this information
Template: request.TemplateID,
AllocatedSize: template.HostSize,
})
if err != nil {
return nil, err
}
// --- query provider for host creation ---
// Retry creation until success, for 10 minutes
var (
httpResp *http.Response
r servers.CreateResult
)
retryErr := retry.WhileUnsuccessfulDelay5Seconds(
func() error {
httpResp, r.Err = client.osclt.Compute.Post(client.osclt.Compute.ServiceURL("servers"), b, &r.Body, &gc.RequestOpts{
OkCodes: []int{200, 202},
})
server, err := r.Extract()
if err != nil {
if server != nil {
servers.Delete(client.osclt.Compute, server.ID)
}
return fmt.Errorf("query to create host '%s' failed: %s (HTTP return code: %d)", request.ResourceName, openstack.ProviderErrorToString(err), httpResp.StatusCode)
// msg := fmt.Sprintf(msgFail, openstack.ProviderErrorToString(err))
// // TODO Gotcha !!
// log.Debugf(msg)
// return fmt.Errorf(msg)
}
host.ID = server.ID
defer func() {
if err != nil {
derr := servers.Delete(client.osclt.Compute, server.ID).ExtractErr()
if derr != nil {
log.Errorf("Failed to delete host '%s': %v", server.Name, derr)
}
}
}()
// Wait that host is ready, not just that the build is started
host, err = client.WaitHostReady(host, time.Minute*5)
if err != nil {
switch err.(type) {
case model.ErrResourceNotAvailable:
return fmt.Errorf("host '%s' is in ERROR state", request.ResourceName)
default:
return fmt.Errorf("timeout waiting host '%s' ready: %s", request.ResourceName, openstack.ProviderErrorToString(err))
// msg := fmt.Sprintf(msgFail, openstack.ProviderErrorToString(err))
// // TODO Gotcha !!
// log.Debugf(msg)
// return fmt.Errorf(msg)
}
}
return nil
},
10*time.Minute,
)
if retryErr != nil {
return nil, err
}
if host == nil {
return nil, errors.New("unexpected problem creating host")
}
// Starting from here, delete host if exiting with error
defer func() {
if err != nil {
derr := client.DeleteHost(host.ID)
if derr != nil {
log.Warnf("Failed to delete host '%s': %v", host.Name, derr)
}
}
}()
if request.PublicIP {
fip, err := client.attachFloatingIP(host)
if err != nil {
spew.Dump(err)
return nil, fmt.Errorf("error attaching public IP for host '%s': %s", request.ResourceName, openstack.ProviderErrorToString(err))
}
// Starting from here, delete Floating IP if exiting with error
defer func() {
if err != nil {
derr := client.DeleteFloatingIP(fip.ID)
if derr != nil {
log.Errorf("Error deleting Floating IP: %v", derr)
}
}
}()
err = host.Properties.Get(HostProperty.NetworkV1, hostNetworkV1)
if err != nil {
return nil, err
}
if IPVersion.IPv4.Is(fip.PublicIPAddress) {
hostNetworkV1.PublicIPv4 = fip.PublicIPAddress
} else if IPVersion.IPv6.Is(fip.PublicIPAddress) {
hostNetworkV1.PublicIPv6 = fip.PublicIPAddress
}
// Updates Host property NetworkV1 in host instance
err = host.Properties.Set(HostProperty.NetworkV1, hostNetworkV1)
if err != nil {
return nil, err
}
if defaultGateway == nil && defaultNetwork.Name != model.SingleHostNetworkName {
err = client.enableHostRouterMode(host)
if err != nil {
return nil, fmt.Errorf("error enabling gateway mode of host '%s': %s", request.ResourceName, openstack.ProviderErrorToString(err))
}
}
}
log.Infoln(msgSuccess)
return host, nil
}
// validatehostName validates the name of an host based on known FlexibleEngine requirements
func validatehostName(req model.HostRequest) (bool, error) {
s := check.Struct{
"ResourceName": check.Composite{
check.NonEmpty{},
check.Regex{Constraint: `^[a-zA-Z0-9_-]+$`},
check.MaxChar{Constraint: 64},
},
}
e := s.Validate(req)
if e.HasErrors() {
errors, _ := e.GetErrorsByKey("ResourceName")
var errs []string
for _, msg := range errors {
errs = append(errs, msg.Error())
}
return false, fmt.Errorf(strings.Join(errs, " + "))
}
return true, nil
}
// GetHost updates the data inside host with the data from provider
func (client *Client) GetHost(hostParam interface{}) (*model.Host, error) {
var (
host *model.Host
server *servers.Server
err error
notFound bool
)
switch hostParam.(type) {
case *model.Host:
host = hostParam.(*model.Host)
case string:
host = model.NewHost()
host.ID = hostParam.(string)
default:
panic("hostParam must be a string or a *model.Host!")
}
const timeout = time.Minute * 15
retryErr := retry.WhileUnsuccessful(
func() error {
server, err = servers.Get(client.osclt.Compute, host.ID).Extract()
if err != nil {
switch err.(type) {
case gc.ErrDefault404:
// If error is "resource not found", we want to return GopherCloud error as-is to be able
// to behave differently in this special case. To do so, stop the retry
notFound = true
return nil
case gc.ErrDefault500:
// When the response is "Internal Server Error", retries
log.Println("received 'Internal Server Error', retrying...")
return err
}
// Any other error stops the retry
err = fmt.Errorf("Error getting host '%s': %s", host.ID, openstack.ProviderErrorToString(err))
return nil
}
if server.Status != "ERROR" && server.Status != "CREATING" {
host.LastState = toHostState(server.Status)
return nil
}
return fmt.Errorf("server not ready yet")
},
timeout,
1*time.Second,
)
if retryErr != nil {
switch retryErr.(type) {
case retry.ErrTimeout:
msg := "failed to get host"
if host != nil {
msg += fmt.Sprintf(" '%s'", host.Name)
}
msg += fmt.Sprintf(" information after %v", timeout)
if err != nil {
msg += fmt.Sprintf(": %v", err)
}
return nil, fmt.Errorf(msg)
default:
}
}
if err != nil {
return nil, err
}
if notFound {
return nil, model.ResourceNotFoundError("host", host.ID)
}
err = client.complementHost(host, server)
return host, err
}
// complementHost complements Host data with content of server parameter
func (client *Client) complementHost(host *model.Host, server *servers.Server) error {
networks, addresses, ipv4, ipv6, err := client.collectAddresses(host)
if err != nil {
return err
}
// Updates intrinsic data of host if needed
if host.ID == "" {
host.ID = server.ID
}
if host.Name == "" {
host.Name = server.Name
}
host.LastState = toHostState(server.Status)
// Updates Host Property propsv1.HostDescription
hostDescriptionV1 := propsv1.NewHostDescription()
err = host.Properties.Get(HostProperty.DescriptionV1, hostDescriptionV1)
if err != nil {
return err
}
hostDescriptionV1.Created = server.Created
hostDescriptionV1.Updated = server.Updated
err = host.Properties.Set(HostProperty.DescriptionV1, hostDescriptionV1)
if err != nil {
return err
}
// Updates Host Property propsv1.HostSizing
hostSizingV1 := propsv1.NewHostSizing()
err = host.Properties.Get(HostProperty.SizingV1, hostSizingV1)
if err != nil {
return err
}
hostSizingV1.AllocatedSize = client.toHostSize(server.Flavor)
err = host.Properties.Set(HostProperty.SizingV1, hostSizingV1)
if err != nil {
return err
}
// Updates Host Property HostNetwork
hostNetworkV1 := propsv1.NewHostNetwork()
err = host.Properties.Get(HostProperty.NetworkV1, hostNetworkV1)
if err != nil {
return nil
}
if hostNetworkV1.PublicIPv4 == "" {
hostNetworkV1.PublicIPv4 = ipv4
}
if hostNetworkV1.PublicIPv6 == "" {
hostNetworkV1.PublicIPv6 = ipv6
}
if len(hostNetworkV1.NetworksByID) > 0 {
ipv4Addresses := map[string]string{}
ipv6Addresses := map[string]string{}
for netid, netname := range hostNetworkV1.NetworksByID {
if ip, ok := addresses[IPVersion.IPv4][netid]; ok {
ipv4Addresses[netid] = ip
} else if ip, ok := addresses[IPVersion.IPv4][netname]; ok {
ipv4Addresses[netid] = ip
} else {
ipv4Addresses[netid] = ""
}
if ip, ok := addresses[IPVersion.IPv6][netid]; ok {
ipv6Addresses[netid] = ip
} else if ip, ok := addresses[IPVersion.IPv6][netname]; ok {
ipv6Addresses[netid] = ip
} else {
ipv6Addresses[netid] = ""
}
}
hostNetworkV1.IPv4Addresses = ipv4Addresses
hostNetworkV1.IPv6Addresses = ipv6Addresses
} else {
networksByID := map[string]string{}
ipv4Addresses := map[string]string{}
ipv6Addresses := map[string]string{}
for _, netid := range networks {
networksByID[netid] = ""
if ip, ok := addresses[IPVersion.IPv4][netid]; ok {
ipv4Addresses[netid] = ip
} else {
ipv4Addresses[netid] = ""
}
if ip, ok := addresses[IPVersion.IPv6][netid]; ok {
ipv6Addresses[netid] = ip
} else {
ipv6Addresses[netid] = ""
}
}
hostNetworkV1.NetworksByID = networksByID
// IPvxAddresses are here indexed by names... At least we have them...
hostNetworkV1.IPv4Addresses = ipv4Addresses
hostNetworkV1.IPv6Addresses = ipv6Addresses
}
// Updates network name and relationships if needed
for netid, netname := range hostNetworkV1.NetworksByID {
if netname == "" {
net, err := client.GetNetwork(netid)
if err != nil {
log.Errorf("failed to get network '%s'", netid)
continue
}
hostNetworkV1.NetworksByID[netid] = net.Name
hostNetworkV1.NetworksByName[net.Name] = netid
}
}
return host.Properties.Set(HostProperty.NetworkV1, hostNetworkV1)
}
// collectAddresses converts adresses returned by the OpenStack driver
// Returns string slice containing the name of the networks, string map of IP addresses
// (indexed on network name), public ipv4 and ipv6 (if they exists)
func (client *Client) collectAddresses(host *model.Host) ([]string, map[IPVersion.Enum]map[string]string, string, string, error) {
var (
networks = []string{}
addrs = map[IPVersion.Enum]map[string]string{}
AcccessIPv4 string
AcccessIPv6 string
allInterfaces = []nics.Interface{}
)
pager := client.listInterfaces(host.ID)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
list, err := nics.ExtractInterfaces(page)
if err != nil {
return false, err
}
allInterfaces = append(allInterfaces, list...)
return true, nil
})
if err != nil {
return networks, addrs, "", "", err
}
addrs[IPVersion.IPv4] = map[string]string{}
addrs[IPVersion.IPv6] = map[string]string{}
for _, item := range allInterfaces {
networks = append(networks, item.NetID)
for _, address := range item.FixedIPs {
fixedIP := address.IPAddress
ipv4 := net.ParseIP(fixedIP).To4() != nil
if item.NetID == client.osclt.Cfg.ProviderNetwork {
if ipv4 {
AcccessIPv4 = fixedIP
} else {
AcccessIPv6 = fixedIP
}
} else {
if ipv4 {
addrs[IPVersion.IPv4][item.NetID] = fixedIP
} else {
addrs[IPVersion.IPv6][item.NetID] = fixedIP
}
}
}
}
return networks, addrs, AcccessIPv4, AcccessIPv6, nil
}
// GetHostByName ...
func (client *Client) GetHostByName(name string) (*model.Host, error) {
return client.osclt.GetHostByName(name)
}
// GetHostState ...
func (client *Client) GetHostState(hostParam interface{}) (HostState.Enum, error) {
return client.osclt.GetHostState(hostParam)
}
// ListHosts lists available hosts
func (client *Client) ListHosts() ([]*model.Host, error) {
pager := servers.List(client.osclt.Compute, servers.ListOpts{})
var hosts []*model.Host
err := pager.EachPage(func(page pagination.Page) (bool, error) {
list, err := servers.ExtractServers(page)
if err != nil {
return false, err
}
for _, srv := range list {
h := model.NewHost()
h.ID = srv.ID
err := client.complementHost(h, &srv)
if err != nil {
return false, err
}
hosts = append(hosts, h)
}
return true, nil
})
if len(hosts) == 0 && err != nil {
return nil, fmt.Errorf("error listing hosts: %s", openstack.ProviderErrorToString(err))
}
return hosts, nil
}
// DeleteHost deletes the host identified by id
func (client *Client) DeleteHost(id string) error {
_, err := client.GetHost(id)
if err != nil {
return err
}
if client.osclt.Cfg.UseFloatingIP {
fip, err := client.getFloatingIPOfHost(id)
if err == nil {
if fip != nil {
err = floatingips.DisassociateInstance(client.osclt.Compute, id, floatingips.DisassociateOpts{
FloatingIP: fip.IP,
}).ExtractErr()
if err != nil {
return fmt.Errorf("error deleting host %s : %s", id, openstack.ProviderErrorToString(err))
}
err = floatingips.Delete(client.osclt.Compute, fip.ID).ExtractErr()
if err != nil {
return fmt.Errorf("error deleting host %s : %s", id, openstack.ProviderErrorToString(err))
}
}
}
}
// Try to remove host for 3 minutes
outerRetryErr := retry.WhileUnsuccessful(
func() error {
resourcePresent := true
// 1st, send delete host order
err = servers.Delete(client.osclt.Compute, id).ExtractErr()
if err != nil {
switch err.(type) {
case gc.ErrDefault404:
// Resource not found, consider deletion succeeded (if the entry doesn't exist at all,
// metadata deletion will return an error)
return nil
default:
return fmt.Errorf("failed to submit host '%s' deletion: %s", id, openstack.ProviderErrorToString(err))
}
}
// 2nd, check host status every 5 seconds until check failed.
// If check succeeds but state is Error, retry the deletion.
// If check fails and error isn't 'resource not found', retry
var host *servers.Server
innerRetryErr := retry.WhileUnsuccessfulDelay5Seconds(
func() error {
host, err = servers.Get(client.osclt.Compute, id).Extract()
if err == nil {
if toHostState(host.Status) == HostState.ERROR {
return nil
}
return fmt.Errorf("host '%s' state is '%s'", host.Name, host.Status)
}
switch err.(type) {
case gc.ErrDefault404:
resourcePresent = false
return nil
}
return err
},
1*time.Minute,
)
if innerRetryErr != nil {
switch innerRetryErr.(type) {
case retry.ErrTimeout:
// retry deletion...
return fmt.Errorf("host '%s' not deleted after %v", id, 1*time.Minute)
default:
return innerRetryErr
}
}
if !resourcePresent {
return nil
}
return fmt.Errorf("host '%s' in state 'ERROR', retrying to delete", id)
},
0,
3*time.Minute,
)
if outerRetryErr != nil {
log.Printf("failed to remove host '%s': %s", id, outerRetryErr.Error())
return err
}
return nil
}
// getFloatingIP returns the floating IP associated with the host identified by hostID
// By convention only one floating IP is allocated to an host
func (client *Client) getFloatingIPOfHost(hostID string) (*floatingips.FloatingIP, error) {
pager := floatingips.List(client.osclt.Compute)
var fips []floatingips.FloatingIP
err := pager.EachPage(func(page pagination.Page) (bool, error) {
list, err := floatingips.ExtractFloatingIPs(page)
if err != nil {
return false, err
}
for _, fip := range list {
if fip.InstanceID == hostID {
fips = append(fips, fip)
}
}
return true, nil
})
if len(fips) == 0 {
if err != nil {
return nil, fmt.Errorf("no floating IP found for host '%s': %s", hostID, openstack.ProviderErrorToString(err))
}
return nil, fmt.Errorf("no floating IP found for host '%s'", hostID)
}
if len(fips) > 1 {
return nil, fmt.Errorf("Configuration error, more than one Floating IP associated to host '%s'", hostID)
}
return &fips[0], nil
}
// attachFloatingIP creates a Floating IP and attaches it to an host
func (client *Client) attachFloatingIP(host *model.Host) (*FloatingIP, error) {
fip, err := client.CreateFloatingIP()
if err != nil {
return nil, fmt.Errorf("failed to attach Floating IP on host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
err = client.AssociateFloatingIP(host, fip.ID)
if err != nil {
nerr := client.DeleteFloatingIP(fip.ID)
if nerr != nil {
log.Warnf("Error deleting floating ip: %v", nerr)
}
return nil, fmt.Errorf("failed to attach Floating IP to host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
return fip, nil
}
// EnableHostRouterMode enables the host to act as a router/gateway.
func (client *Client) enableHostRouterMode(host *model.Host) error {
portID, err := client.getOpenstackPortID(host)
if err != nil {
return fmt.Errorf("failed to enable Router Mode on host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
if portID == nil {
return fmt.Errorf("failed to enable Router Mode on host '%s': failed to find OpenStack port", host.Name)
}
pairs := []ports.AddressPair{
{
IPAddress: "1.1.1.1/0",
},
}
opts := ports.UpdateOpts{AllowedAddressPairs: &pairs}
_, err = ports.Update(client.osclt.Network, *portID, opts).Extract()
if err != nil {
return fmt.Errorf("Failed to enable Router Mode on host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
return nil
}
// DisableHostRouterMode disables the host to act as a router/gateway.
func (client *Client) disableHostRouterMode(host *model.Host) error {
portID, err := client.getOpenstackPortID(host)
if err != nil {
return fmt.Errorf("Failed to disable Router Mode on host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
opts := ports.UpdateOpts{AllowedAddressPairs: nil}
_, err = ports.Update(client.osclt.Network, *portID, opts).Extract()
if err != nil {
return fmt.Errorf("Failed to disable Router Mode on host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
return nil
}
// listInterfaces returns a pager of the interfaces attached to host identified by 'serverID'
func (client *Client) listInterfaces(hostID string) pagination.Pager {
url := client.osclt.Compute.ServiceURL("servers", hostID, "os-interface")
return pagination.NewPager(client.osclt.Compute, url, func(r pagination.PageResult) pagination.Page {
return nics.InterfacePage{SinglePageBase: pagination.SinglePageBase(r)}
})
}
// getOpenstackPortID returns the port ID corresponding to the first private IP address of the host
// returns nil,nil if not found
func (client *Client) getOpenstackPortID(host *model.Host) (*string, error) {
ip := host.GetPrivateIP()
found := false
nic := nics.Interface{}
pager := client.listInterfaces(host.ID)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
list, err := nics.ExtractInterfaces(page)
if err != nil {
return false, err
}
for _, i := range list {
for _, iip := range i.FixedIPs {
if iip.IPAddress == ip {
found = true
nic = i
return false, nil
}
}
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error browsing Openstack Interfaces of host '%s': %s", host.Name, openstack.ProviderErrorToString(err))
}
if found {
return &nic.PortID, nil
}
return nil, nil
}
// toHostSize converts flavor attributes returned by OpenStack driver into api.Host
func (client *Client) toHostSize(flavor map[string]interface{}) *propsv1.HostSize {
if i, ok := flavor["id"]; ok {
fid := i.(string)
tpl, _ := client.GetTemplate(fid)
return tpl.HostSize
}
hostSize := propsv1.NewHostSize()
if _, ok := flavor["vcpus"]; ok {
hostSize.Cores = flavor["vcpus"].(int)
hostSize.DiskSize = flavor["disk"].(int)
hostSize.RAMSize = flavor["ram"].(float32) / 1000.0
}
return hostSize
}
// toHostState converts host status returned by FlexibleEngine driver into HostState enum
func toHostState(status string) HostState.Enum {
switch status {
case "BUILD", "build", "BUILDING", "building":
return HostState.STARTING
case "ACTIVE", "active":
return HostState.STARTED
case "RESCUED", "rescued":
return HostState.STOPPING
case "STOPPED", "stopped", "SHUTOFF", "shutoff":
return HostState.STOPPED
default:
return HostState.ERROR
}
}
// // convertAdresses converts adresses returned by the FlexibleEngine driver and arranges them by version in a map
// func (client *Client) convertAdresses(addresses map[string]interface{}) map[IPVersion.Enum][]string {
// addrs := make(map[IPVersion.Enum][]string)
// for _, obj := range addresses {
// for _, networkAddresses := range obj.([]interface{}) {
// address := networkAddresses.(map[string]interface{})
// version := address["version"].(float64)
// fixedIP := address["addr"].(string)
// switch version {
// case 4:
// addrs[IPVersion.IPv4] = append(addrs[IPVersion.IPv4], fixedIP)
// case 6:
// addrs[IPVersion.IPv6] = append(addrs[IPVersion.IPv4], fixedIP)
// }
// }
// }
// return addrs
// }
// // toHost converts a FlexibleEngine (almost OpenStack...) server into api host
// func (client *Client) toHost(server *servers.Server) *model.Host {
// // adresses, ipv4, ipv6 := client.convertAdresses(server.Addresses)
// adresses := client.convertAdresses(server.Addresses)
// host := model.Host{
// ID: server.ID,
// Name: server.Name,
// AccessIPv4: server.AccessIPv4,
// AccessIPv6: server.AccessIPv6,
// LastState: toHostState(server.Status),
// }
// networkV1 := &model.HostExtensionNetworkV1{
// PrivateIPsV4: adresses[IPVersion.IPv4],
// PrivateIPsV6: adresses[IPVersion.IPv6],
// }
// err := host.Properties.Set(HostProperty.NetworkV1, networkV1)
// if err != nil {
// log.Errorf(err.Error())
// }
// sizingV1 := model.HostExtensionSizingV1{
// AllocatedSize: client.toHostSize(server.Flavor),
// }
// err = host.Properties.Set(HostProperty.SizingV1, sizingV1)
// if err != nil {
// log.Errorf(err.Error())
// }
// if server.AccessIPv4 != "" {
// host.AccessIPv4 = server.AccessIPv4
// }
// if server.AccessIPv6 == "" {
// host.AccessIPv6 = server.AccessIPv6
// }
// return &host
// }
// CreateKeyPair creates and import a key pair
func (client *Client) CreateKeyPair(name string) (*model.KeyPair, error) {
return client.osclt.CreateKeyPair(name)
}
// GetKeyPair returns the key pair identified by id
func (client *Client) GetKeyPair(id string) (*model.KeyPair, error) {
return client.osclt.GetKeyPair(id)
}
// ListKeyPairs lists available key pairs
func (client *Client) ListKeyPairs() ([]model.KeyPair, error) {
return client.osclt.ListKeyPairs()
}
// DeleteKeyPair deletes the key pair identified by id
func (client *Client) DeleteKeyPair(id string) error {
return client.osclt.DeleteKeyPair(id)
}
// ListAvailabilityZones lists the usable Availability Zones
func (client *Client) ListAvailabilityZones(all bool) (map[string]bool, error) {
return client.osclt.ListAvailabilityZones(all)
}
// GetImage returns the Image referenced by id
func (client *Client) GetImage(id string) (*model.Image, error) {
return client.osclt.GetImage(id)
}
func isWindowsImage(image model.Image) bool {
return strings.Contains(strings.ToLower(image.Name), "windows")
}
func isBMSImage(image model.Image) bool {
return strings.HasPrefix(strings.ToUpper(image.Name), "OBS-BMS") ||
strings.HasPrefix(strings.ToUpper(image.Name), "OBS_BMS")
}
// ListImages lists available OS images
func (client *Client) ListImages(all bool) ([]model.Image, error) {
images, err := client.osclt.ListImages(all)
if err != nil {
return nil, err
}
if all {
return images, nil
}
imageFilter := filters.NewFilter(isWindowsImage).Not().And(filters.NewFilter(isBMSImage).Not())
return filters.FilterImages(images, imageFilter), nil
}
func addGPUCfg(tpl *model.HostTemplate) {
if cfg, ok := gpuMap[tpl.Name]; ok {
tpl.GPUNumber = cfg.GPUNumber
tpl.GPUType = cfg.GPUType
}
}
// GetTemplate returns the Template referenced by id
func (client *Client) GetTemplate(id string) (*model.HostTemplate, error) {
tpl, err := client.osclt.GetTemplate(id)
if tpl != nil {
addGPUCfg(tpl)
}
return tpl, err
}
// ListTemplates lists available host templates
// Host templates are sorted using Dominant Resource Fairness Algorithm
func (client *Client) ListTemplates(all bool) ([]model.HostTemplate, error) {
allTemplates, err := client.osclt.ListTemplates(all)
if err != nil {
return nil, err
}
var tpls []model.HostTemplate
for _, tpl := range allTemplates {
addGPUCfg(&tpl)
tpls = append(tpls, tpl)
}
return tpls, nil
}
// StopHost stops the host identified by id
func (client *Client) StopHost(id string) error {
return client.osclt.StopHost(id)
}
// StartHost starts the host identified by id
func (client *Client) StartHost(id string) error {
return client.osclt.StartHost(id)
}
// RebootHost ...
func (client *Client) RebootHost(id string) error {
return client.osclt.RebootHost(id)
}
// WaitHostReady waits an host achieve ready state
// hostParam can be an ID of host, or an instance of *model.Host; any other type will panic
func (client *Client) WaitHostReady(hostParam interface{}, timeout time.Duration) (*model.Host, error) {
var (
host *model.Host
hostInError bool
err error
)
retryErr := retry.WhileUnsuccessful(
func() error {
host, err = client.GetHost(hostParam)
if err != nil {
return err
}
if host.LastState == HostState.ERROR {
hostInError = true
return nil
}
if host.LastState != HostState.STARTED {
return fmt.Errorf("not in ready state (current state: %s)", host.LastState.String())
}
return nil
},
2*time.Second,
timeout,
)
if retryErr != nil {
switch retryErr.(type) {
case retry.ErrTimeout:
msg := "timeout waiting to get host"
if host != nil {
msg += fmt.Sprintf(" '%s'", host.Name)
}
msg += fmt.Sprintf("information after %v", timeout)
if err != nil {
msg += fmt.Sprintf(": %v", err)
}
return nil, fmt.Errorf(msg)
default:
return nil, retryErr
}
}
// If hoste state is ERROR, returns the error
if hostInError {
return nil, model.ResourceNotAvailableError("host", "")
}
return host, err
}
|
package config
import (
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"strings"
)
const ExchangeURLEnvvarName = "HZN_EXCHANGE_URL"
const FileSyncServiceCSSURLEnvvarName = "HZN_FSS_CSSURL"
type HorizonConfig struct {
Edge Config
AgreementBot AGConfig
Collaborators Collaborators
ArchSynonyms ArchSynonyms
}
// This is the configuration options for Edge component flavor of Anax
type Config struct {
ServiceStorage string // The base storage directory where the service can write or get the data.
TorrentDir string
APIListen string
DBPath string
DockerEndpoint string
DockerCredFilePath string
DefaultCPUSet string
DefaultServiceRegistrationRAM int64
StaticWebContent string
PublicKeyPath string
TrustSystemCACerts bool // If equal to true, the HTTP client factory will set up clients that trust CA certs provided by a Linux distribution (see https://golang.org/pkg/crypto/x509/#SystemCertPool and https://golang.org/src/crypto/x509/root_linux.go)
CACertsPath string // Path to a file containing PEM-encoded x509 certs HTTP clients in Anax will trust (additive to the configuration option "TrustSystemCACerts")
ExchangeURL string
DefaultHTTPClientTimeoutS uint
PolicyPath string
ExchangeHeartbeat int // Seconds between heartbeats
ExchangeVersionCheckIntervalM int64 // Exchange version check interval in minutes. The default is 720.
AgreementTimeoutS uint64 // Number of seconds to wait before declaring agreement not finalized in blockchain
DVPrefix string // When passing agreement ids into a workload container, add this prefix to the agreement id
RegistrationDelayS uint64 // The number of seconds to wait after blockchain init before registering with the exchange. This is for testing initialization ONLY.
ExchangeMessageTTL int // The number of seconds the exchange will keep this message before automatically deleting it
TorrentListenAddr string // Override the torrent listen address just in case there are conflicts, syntax is "host:port"
UserPublicKeyPath string // The location to store user keys uploaded through the REST API
ReportDeviceStatus bool // whether to report the device status to the exchange or not.
TrustCertUpdatesFromOrg bool // whether to trust the certs provided by the organization on the exchange or not.
TrustDockerAuthFromOrg bool // whether to turst the docker auths provided by the organization on the exchange or not.
ServiceUpgradeCheckIntervalS int64 // service upgrade check interval in seconds. The default is 300 seconds.
MultipleAnaxInstances bool // multiple anax instances running on the same machine
DefaultServiceRetryCount int // the default service retry count if retries are not specified by the policy file. The default value is 2.
DefaultServiceRetryDuration uint64 // the default retry duration in seconds. The next retry cycle occurs after the duration. The default value is 600
ServiceConfigStateCheckIntervalS int // the service configuration state check interval. The default is 30 seconds.
FileSyncService FSSConfig // The config for the embedded ESS sync service.
// these Ids could be provided in config or discovered after startup by the system
BlockchainAccountId string
BlockchainDirectoryAddress string
}
// This is the configuration options for Agreement bot flavor of Anax
type AGConfig struct {
TxLostDelayTolerationSeconds int
AgreementWorkers int
DBPath string
Postgresql PostgresqlConfig // The Postgresql config if it is being used
PartitionStale uint64 // Number of seconds to wait before declaring a partition to be stale (i.e. the previous owner has unexpectedly terminated).
ProtocolTimeoutS uint64 // Number of seconds to wait before declaring proposal response is lost
AgreementTimeoutS uint64 // Number of seconds to wait before declaring agreement not finalized in blockchain
NoDataIntervalS uint64 // default should be 15 mins == 15*60 == 900. Ignored if the policy has data verification disabled.
ActiveAgreementsURL string // This field is used when policy files indicate they want data verification but they dont specify a URL
ActiveAgreementsUser string // This is the userid the agbot uses to authenticate to the data verifivcation API
ActiveAgreementsPW string // This is the password for the ActiveAgreementsUser
PolicyPath string // The directory where policy files are kept, default /etc/provider-tremor/policy/
NewContractIntervalS uint64 // default should be 1
ProcessGovernanceIntervalS uint64 // How long the gov sleeps before general gov checks (new payloads, interval payments, etc).
IgnoreContractWithAttribs string // A comma seperated list of contract attributes. If set, the contracts that contain one or more of the attributes will be ignored. The default is "ethereum_account".
ExchangeURL string // The URL of the Horizon exchange. If not configured, the exchange will not be used.
ExchangeHeartbeat int // Seconds between heartbeats to the exchange
ExchangeVersionCheckIntervalM int64 // Exchange version check interval in minutes. The default is 5. 0 means no periodic checking.
ExchangeId string // The id of the agbot, not the userid of the exchange user. Must be org qualified.
ExchangeToken string // The agbot's authentication token
DVPrefix string // When looking for agreement ids in the data verification API response, look for agreement ids with this prefix.
ActiveDeviceTimeoutS int // The amount of time a device can go without heartbeating and still be considered active for the purposes of search
ExchangeMessageTTL int // The number of seconds the exchange will keep this message before automatically deleting it
MessageKeyPath string // The path to the location of messaging keys
DefaultWorkloadPW string // The default workload password if none is specified in the policy file
APIListen string // Host and port for the API to listen on
PurgeArchivedAgreementHours int // Number of hours to leave an archived agreement in the database before automatically deleting it
CheckUpdatedPolicyS int // The number of seconds to wait between checks for an updated policy file. Zero means auto checking is turned off.
}
func (c *HorizonConfig) UserPublicKeyPath() string {
if c.Edge.UserPublicKeyPath == "" {
if commonPath := os.Getenv("HZN_VAR_BASE"); commonPath != "" {
thePath := path.Join(os.Getenv("HZN_VAR_BASE"), USERKEYDIR)
c.Edge.UserPublicKeyPath = thePath
} else {
return HZN_VAR_BASE_DEFAULT
}
}
return c.Edge.UserPublicKeyPath
}
func (c *HorizonConfig) IsBoltDBConfigured() bool {
return len(c.AgreementBot.DBPath) != 0
}
func (c *HorizonConfig) IsPostgresqlConfigured() bool {
return (c.AgreementBot.Postgresql != (PostgresqlConfig{})) && (c.GetPartitionStale() != 0)
}
func (c *HorizonConfig) GetPartitionStale() uint64 {
if c.AgreementBot.PartitionStale == 0 {
return 60
} else {
return c.AgreementBot.PartitionStale
}
}
func getDefaultBase() string {
basePath := os.Getenv("HZN_VAR_BASE")
if basePath == "" {
basePath = HZN_VAR_BASE_DEFAULT
}
return basePath
}
// some configuration is provided by envvars; in this case we populate this config object from expected envvars
func enrichFromEnvvars(config *HorizonConfig) error {
if exchangeURL := os.Getenv(ExchangeURLEnvvarName); exchangeURL != "" {
config.Edge.ExchangeURL = exchangeURL
config.AgreementBot.ExchangeURL = exchangeURL
} else {
// TODO: Enable this once we require the envvar to be set. For now, we don't return the error
// return fmt.Errorf("Unspecified but required envvar: %s", ExchangeURLEnvvarName)
}
if fssCSSURL := os.Getenv(FileSyncServiceCSSURLEnvvarName); fssCSSURL != "" {
config.Edge.FileSyncService.CSSURL = fssCSSURL
}
return nil
}
func Read(file string) (*HorizonConfig, error) {
if _, err := os.Stat(file); err != nil {
return nil, fmt.Errorf("Config file not found: %s. Error: %v", file, err)
}
// attempt to parse config file
path, err := os.Open(filepath.Clean(file))
if err != nil {
return nil, fmt.Errorf("Unable to read config file: %s. Error: %v", file, err)
} else {
// instantiate mostly empty which will be filled. Values here are defaults that can be overridden by the user
config := HorizonConfig{
Edge: Config{
DefaultHTTPClientTimeoutS: 20,
},
}
err := json.NewDecoder(path).Decode(&config)
if err != nil {
return nil, fmt.Errorf("Unable to decode content of config file: %v", err)
}
err = enrichFromEnvvars(&config)
if err != nil {
return nil, fmt.Errorf("Unable to enrich content of config file with envvars: %v", err)
}
// set the defaults here in case the attributes are not setup by the user.
if config.Edge.ExchangeVersionCheckIntervalM == 0 {
config.Edge.ExchangeVersionCheckIntervalM = 720
}
if config.AgreementBot.ExchangeVersionCheckIntervalM == 0 {
config.AgreementBot.ExchangeVersionCheckIntervalM = 5
}
if config.Edge.ServiceUpgradeCheckIntervalS == 0 {
config.Edge.ServiceUpgradeCheckIntervalS = 300
}
if config.Edge.ServiceConfigStateCheckIntervalS == 0 {
config.Edge.ServiceConfigStateCheckIntervalS = 30
}
// set default retry parameters
// the default DefaultServiceRetryCount is 2. It means 2 tries including the original one.
// so it is actually 1 retry.
if config.Edge.DefaultServiceRetryCount == 0 {
config.Edge.DefaultServiceRetryCount = 2
}
if config.Edge.DefaultServiceRetryDuration == 0 {
config.Edge.DefaultServiceRetryDuration = 600
}
// add a slash at the back of the ExchangeUrl
if config.Edge.ExchangeURL != "" {
config.Edge.ExchangeURL = strings.TrimRight(config.Edge.ExchangeURL, "/") + "/"
}
if config.AgreementBot.ExchangeURL != "" {
config.AgreementBot.ExchangeURL = strings.TrimRight(config.AgreementBot.ExchangeURL, "/") + "/"
}
// now make collaborators instance and assign it to member in this config
collaborators, err := NewCollaborators(config)
if err != nil {
return nil, err
}
config.Collaborators = *collaborators
if config.ArchSynonyms == nil {
config.ArchSynonyms = NewArchSynonyms()
}
// success at last!
return &config, nil
}
}
|
package datastruct
import (
"errors"
"github.com/MintegralTech/juno/document"
)
type Slice []*Element
func (s Slice) Iterator() *SliceIterator {
return &SliceIterator{index: 0, data: &s}
}
func (s Slice) Len() int {
return len(s)
}
func NewSlice() *Slice {
return &Slice{}
}
func (s *Slice) Add(id document.DocId, value interface{}) {
if s.Len() == 0 {
*s = append(*s, &Element{key: id, value: value})
} else {
for i := 0; i < s.Len(); i++ {
if (*s)[i].key == id {
(*s)[i].value = value
break
} else if (*s)[i].key > id {
tmp := append([]*Element{}, (*s)[i:]...)
*s = append((*s)[0:i], &Element{key: id, value: value})
*s = append(*s, tmp...)
break
}
}
if (*s)[s.Len()-1].key < id {
*s = append(*s, &Element{key: id, value: value})
}
}
}
func (s *Slice) Get(id document.DocId) (*Element, error) {
for _, v := range *s {
if v.key == id {
return v, nil
}
}
return nil, errors.New("not found")
}
|
package create
import (
"github.com/spf13/cobra"
"github.com/makkes/gitlab-cli/api"
"github.com/makkes/gitlab-cli/cmd/create/accesstoken"
createproj "github.com/makkes/gitlab-cli/cmd/create/project"
"github.com/makkes/gitlab-cli/cmd/create/vars"
"github.com/makkes/gitlab-cli/config"
)
func NewCommand(client api.Client, cfg config.Config) *cobra.Command {
var project *string
cmd := &cobra.Command{
Use: "create",
Short: "Create a resource such as a project or a variable",
}
project = cmd.PersistentFlags().StringP("project", "p", "", "If present, the project scope for this CLI request")
cmd.AddCommand(createproj.NewCommand(client))
cmd.AddCommand(vars.NewCommand(client, project))
cmd.AddCommand(accesstoken.NewCommand(client))
return cmd
}
|
// +build linux
package extract
func DocToTxt(filePath string)string{
return ""
}
|
package kcpNetwork
import (
"context"
"fmt"
"github.com/xtaci/kcp-go/v5"
"github.com/yaice-rx/yaice/log"
"github.com/yaice-rx/yaice/network"
"go.uber.org/zap"
"time"
)
type KCPClient struct {
type_ network.ServeType
dialRetriesCount int32
address string
conn network.IConn
packet network.IPacket
opt network.IOptions
ctx context.Context
cancel context.CancelFunc
callFunc func(conn network.IConn, err error)
}
func NewClient(packet network.IPacket, address string, opt network.IOptions, callFunc func(conn network.IConn, err error)) network.IClient {
c := &KCPClient{
type_: network.Serve_Client,
address: address,
packet: packet,
opt: opt,
dialRetriesCount: 0,
callFunc: callFunc,
}
ctx, cancel := context.WithCancel(context.Background())
c.ctx = ctx
c.cancel = cancel
return c
}
func (c *KCPClient) Connect() network.IConn {
LOOP:
tcpConn, err := kcp.DialWithOptions(c.address, nil, 0, 0)
if err != nil {
time.Sleep(3 * time.Second)
if c.opt.GetMaxRetires() < c.dialRetriesCount {
log.AppLogger.Error("网络重连失败:"+err.Error(), zap.String("function", "network.tcp.Client.Connect"))
return nil
}
log.AppLogger.Warn(fmt.Sprintf("第{%d}网络重连中", c.dialRetriesCount))
c.dialRetriesCount += 1
goto LOOP
}
//连接上的时候,重置连接次数
c.dialRetriesCount = 0
c.conn = NewConn(c, tcpConn, c.packet, c.opt, network.Serve_Client, c.ctx, c.cancel)
//读取网络通道数据
go c.conn.Start()
return c.conn
}
func (c *KCPClient) ReConnect() network.IConn {
return c.Connect()
}
func (c *KCPClient) Close(err error) {
c.cancel()
//设置当前客户端的状态
c.callFunc(c.conn, err)
c.conn.Close()
}
|
package main
import (
"fmt"
"time"
)
// Timers represent a single event in the future. You tell the timer how long you want to wait, and it provides a channel that will be notified at that time.
func main() {
timer1 := time.NewTimer(2 * time.Second)
// Channell of timer = C
<-timer1.C
fmt.Println("Timer 1 fired")
// One reason a timer may be useful is that you can cancel the timer before it fires.
timer2 := time.NewTimer(2 * time.Second)
go func() {
<-timer2.C
fmt.Println("Timer 2 fired")
}()
stopped := timer2.Stop()
if stopped {
fmt.Println("Timer 2 stopped. Thats good thing")
}
time.Sleep(2 * time.Second)
}
|
package controller
import (
"encoding/json"
"log"
"net/http"
"ocg-be/models"
"ocg-be/repositories"
"ocg-be/util"
"strconv"
"github.com/gorilla/mux"
)
var productStorage *repositories.ProductStorage
var productCollection *repositories.RequestGetProductByCollectionId
func GetProducts(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page == 0 {
page = 1
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit == 0 {
limit = 9
}
sort := r.URL.Query().Get("sort")
if sort == "" {
sort = "asc"
}
orderBy := r.URL.Query().Get("order")
if orderBy == "" {
orderBy = "id"
}
search := r.FormValue("search")
search = "%" + search + "%"
products := repositories.Pageniate(&repositories.ProductStorage{}, page, limit, orderBy, sort, search)
respon, _ := json.Marshal(products)
w.WriteHeader(http.StatusOK)
w.Write(respon)
}
func CreateProduct(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var product *models.Product
err := util.BodyParser(&product, w, r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
// thêm sản phẩm vào dtb
product.Handle = util.ParseNametoHandle(product.Name)
product.Id, err = productStorage.Add(product)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
//chuyển về dạng jsón
respon, err := json.Marshal(product)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
//trả kết quả
w.WriteHeader(http.StatusOK)
w.Write([]byte(respon))
}
func GetProductByHandle(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
handle := vars["handle"]
dbResult, err := productStorage.GetByHandle(handle)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
respon, _ := json.Marshal(dbResult)
//trả kết quả
w.WriteHeader(http.StatusOK)
w.Write([]byte(respon))
}
func DeleteProductById(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, _ := strconv.Atoi(vars["id"])
imageStorage.DeleteImageByProductId(id)
err := productStorage.DeleteById(id)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte("delete success"))
}
func UpdateProduct(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var product *models.Product
err := util.BodyParser(&product, w, r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
}
dbResult, err := productStorage.UpdateProduct(product)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
}
log.Println(dbResult)
respon, _ := json.Marshal(dbResult)
w.WriteHeader(http.StatusOK)
w.Write([]byte(respon))
}
func GetProductCollection(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
err := util.BodyParser(&productCollection, w, r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
}
page, limit, orderBy, sort, search := productCollection.Page, productCollection.Limit, productCollection.Order, productCollection.Sort, productCollection.Search
if page == 0 {
page = 1
}
if limit == 0 {
limit = 9
}
if sort == "" {
sort = "asc"
}
if orderBy == "" {
orderBy = "id"
}
search = "%" + search + "%"
products := repositories.Pageniate(productCollection, page, limit, orderBy, sort, search)
respon, _ := json.Marshal(products)
w.WriteHeader(http.StatusOK)
w.Write(respon)
}
|
package cache
import (
"testing"
"fmt"
"hub000.xindong.com/rookie/rookie-framework/protobuf"
)
func TestCacheHandler(t *testing.T) {
OnInit()
cache := NewRedisCache(RedisConfig{RedisAddr:"172.26.163.124:6379" , RedisPassword:"ztjztj120" , RedisDB:0})
cache.StartConnection()
Module.RegistCache("base" , cache)
//Test the test connection.
resp , err := Module.Call(Communicator , protobuf.CacheTestConnectionReq{"base"})
if (err != nil){
t.Error(err)
}
if resp.GetProtocolNum() == protobuf.CacheTestConnectionRespNum{
fmt.Println("resp 1:")
fmt.Println(resp.GetProtocolNum())
resp := resp.(protobuf.CacheTestConnectionResp)
fmt.Println(resp.Status)
}
//Test the set.
resp , err = Module.Call(Communicator , protobuf.CacheSetReq{"base" , "test" , "test_value" , 0})
if (err != nil){
t.Error(err)
}
if resp.GetProtocolNum() == protobuf.CacheSetRespNum{
fmt.Println("resp 2:")
fmt.Println(resp.GetProtocolNum())
resp := resp.(protobuf.CacheSetResp)
fmt.Println(resp.Status)
}
//Test the get.
resp , err = Module.Call(Communicator , protobuf.CacheGetReq{"base" , "test" })
if (err != nil){
t.Error(err)
}
if resp.GetProtocolNum() == protobuf.CacheGetRespNum{
fmt.Println("resp 3:")
fmt.Println(resp.GetProtocolNum())
resp := resp.(protobuf.CacheGetResp)
fmt.Println(resp.Value)
fmt.Println(resp.Status)
}
//Test the delete.
resp , err = Module.Call(Communicator , protobuf.CacheDeleteReq{"base" , "test" })
if (err != nil){
t.Error(err)
}
if resp.GetProtocolNum() == protobuf.CacheDeleteRespNum{
fmt.Println("resp 4:")
fmt.Println(resp.GetProtocolNum())
resp := resp.(protobuf.CacheDeleteResp)
fmt.Println(resp.Status)
}
resp , err = Module.Call(Communicator , protobuf.CacheGetReq{"base" , "test" })
if (err != nil){
t.Error(err)
}
if resp.GetProtocolNum() == protobuf.CacheGetRespNum{
fmt.Println("resp 5:")
fmt.Println(resp.GetProtocolNum())
resp := resp.(protobuf.CacheGetResp)
fmt.Println(resp.Value)
fmt.Println(resp.Status)
}
} |
package sys
import (
"os"
"syscall"
"testing"
"github.com/cilium/ebpf/internal/unix"
qt "github.com/frankban/quicktest"
)
func init() {
// Free up fd 0 for TestFD.
stdin, err := unix.FcntlInt(os.Stdin.Fd(), unix.F_DUPFD_CLOEXEC, 1)
if err != nil {
panic(err)
}
old := os.Stdin
os.Stdin = os.NewFile(uintptr(stdin), "stdin")
old.Close()
reserveFdZero()
}
func reserveFdZero() {
fd, err := unix.Open(os.DevNull, syscall.O_RDONLY, 0)
if err != nil {
panic(err)
}
if fd != 0 {
panic(err)
}
}
func TestFD(t *testing.T) {
_, err := NewFD(-1)
qt.Assert(t, err, qt.IsNotNil, qt.Commentf("negative fd should be rejected"))
fd, err := NewFD(0)
qt.Assert(t, err, qt.IsNil)
qt.Assert(t, fd.Int(), qt.Not(qt.Equals), 0, qt.Commentf("fd value should not be zero"))
var stat unix.Stat_t
err = unix.Fstat(0, &stat)
qt.Assert(t, err, qt.ErrorIs, unix.EBADF, qt.Commentf("zero fd should be closed"))
reserveFdZero()
}
func TestFDFile(t *testing.T) {
fd := newFD(openFd(t))
file := fd.File("test")
qt.Assert(t, file, qt.IsNotNil)
qt.Assert(t, file.Close(), qt.IsNil)
qt.Assert(t, fd.File("closed"), qt.IsNil)
_, err := fd.Dup()
qt.Assert(t, err, qt.ErrorIs, ErrClosedFd)
}
func openFd(tb testing.TB) int {
fd, err := unix.Open(os.DevNull, syscall.O_RDONLY, 0)
qt.Assert(tb, err, qt.IsNil)
return fd
}
|
package main
import (
"fmt"
"github.com/priyendra/golang-euler/common"
)
func pow(a, b int) int {
answer := 1
for i := 0; i < b; i++ {
answer *= a
}
return answer
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func allPrimesUpTo(n int) []int {
answer := []int{2}
for i := 3; i <= n; i++ {
if common.IsPrime(int64(i)) {
answer = append(answer, i)
}
}
return answer
}
func main() {
N := 20
primes := allPrimesUpTo(N)
powers := make([]int, len(primes))
for i := 4; i <= N; i++ {
for j := 0; j < len(primes); j++ {
if i%primes[j] == 0 {
power := 0
for i2 := i; i2 != 0; i2 /= primes[j] {
power++
}
powers[j] = max(powers[j], power-1)
}
}
}
answer := 1
for i := 0; i < len(primes); i++ {
answer *= pow(primes[i], powers[i])
}
fmt.Println(answer)
}
|
package orm
import "testing"
func TestBuildUpdate(t *testing.T) {
_, got := BuildUpdate("", nil)
want := "table can't be empty"
if got != nil && got.Error() != want {
t.Errorf("got %q; want %q", got, want)
}
_, got2 := BuildUpdate("user", nil)
want2 := "columns can't be nil"
if got2 != nil && got2.Error() != want2 {
t.Errorf("got %q; want %q", got2, want2)
}
_, got3 := BuildUpdate("user", []string{})
want3 := "columns can't be empty"
if got3 != nil && got3.Error() != want3 {
t.Errorf("got %q; want %q", got3, want3)
}
got4, _ := BuildUpdate("user", []string{"id"})
want4 := "UPDATE user SET id = ?"
if got4 != want4 {
t.Errorf("got %q; want %q", got4, want4)
}
got5, _ := BuildUpdate("user", []string{"id", "name"})
want5 := "UPDATE user SET id = ?, name = ?"
if got5 != want5 {
t.Errorf("got %q; want %q", got5, want5)
}
}
|
package basic
import "fmt"
/*
new([]int) 之后的 list 是一个 *[]int 类型的指针,
不能对指针执行 append 操作。可以使用 make() 初始化之后再用。
同样的,map 和 channel 建议使用 make() 或字面量的方式初始化,不要用 new() 。
所以下面代码不能编译通过
func list1(){
list := new([]int)
list = append(list, 1)
fmt.Println(list)
}
*/
func array() {
arr := [...]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
/*
从数组里提取, 就变切片了
创建切片有三种方式:
基于数组创建切片和直接创建切片的方式外,
还存在第三种创建切片的方式,也是使用比较多的方式,那就是 make 函数.
*/
s1 := arr[2:6]
fmt.Println(s1)
/*
updateSlice(s []int) 入参是slice, 如果写为数组, 就是非法的
即 updateSlice(arr) 是编译错误的
出入的参数必须是数组, 而且实际穿进去的时速组length和入参length必须相同
*/
updateArray(arr)
/*
fmt.Println(arr) 和
fmt.Printf("%v", arr)
是一样的输出结果, 都是:[0 1 2 3 4 5 6 7 8 9]
*/
fmt.Printf("%v", arr)
updateArraypointer(&arr)
fmt.Println(arr)
updateSlice(s1)
fmt.Println(s1, "len=", len(s1), "cap=", cap(s1))
/*
对切片扩容时, 只有append, 符合go的一件事只有一个方法的风格
*/
s1 = append(s1, 100)
fmt.Println(s1, "len=", len(s1), "cap=", cap(s1))
}
/*
不带指针的数组, 不能改变传入数组的值, 必须明确要求传入的是数组指针
*/
func updateArray(s [10]int) {
s[0] = 888
}
/*
只有传入的明确指定是数组指针,才可以改变数组里的值
*/
func updateArraypointer(s *[10]int) {
s[0] = 888
}
/*
这样声明的函数,调用的时候,入参只能是slice,不能是数组
函数参数是按值传递的。当使用切片(slice)作为函数参数时,
意味着函数将获得切片的副本:指向基础数组的起始地址的指针,以及切片的长度和容量
*/
func updateSlice(s []int) {
s[0] = 666
}
func rangeBak() {
//我们也可以只初始化某一个索引的值:
s := []int{4: 1}
fmt.Println(len(s), cap(s)) // 输出:5 5
fmt.Println(s) // 输出:[0 0 0 0 1]
slice := []int{0, 1, 2, 3}
m := make(map[int]*int)
/* TODO 理解range 每个元素副本
for range 循环的时候会创建每个元素的副本,
而不是元素的引用,所以 m[key] = &val 取的都是变量 val 的地址,
所以最后 map 中的所有元素的值都是变量 val 的地址,
因为最后 val 被赋值为3,所有输出都是3.
*/
for key, val := range slice {
m[key] = &val
}
for k, v := range m {
fmt.Println(k, "->", *v)
}
}
func slice1() {
s := make([]int, 5)
/*
append只能对slice操作,
*/
s = append(s, 1, 2, 3)
fmt.Println(s)
/*
输出:
[0 0 0 0 0 1 2 3]
*/
}
func slice2() {
s := make([]int, 0)
s = append(s, 1, 2, 3, 4)
fmt.Println(s)
/*
输出:
[1 2 3 4]
*/
}
/*
可变参数函数
*/
func multiPara(num ...int) {
num[0] = 18
}
func testmultiPara() {
i := []int{5, 6, 7}
/*
直接传入切片报错,
multiPara(i)
i... 表示将切片i打散,
*/
multiPara(i...)
multiPara(1, 2)
/*
array := [2]int{1,2}
只有切片可以用...打散, 数组不行
数组不能传入可变参数, 以下两种方式都是编译错误的
multiPara(array...)
multiPara(array)
*/
fmt.Println(i[0])
/*
切片打散可以作为多参数传入, 但数组不行, 如下程序编译报错
j := [5]int{1, 2, 3}
multiPara(j...)
*/
}
func addValue(s *[]int) {
/*
apppend函数返回的已经不是传入的slice的指针了, 是一个新指针,
为了能返回这个新指针, 就把入参定义为切片指针 *[]int, 即指向切片的指针
*s = 这样的赋值,是把指针的地址改了,即是一个新指针了
*/
*s = append(*s, 3)
fmt.Printf("In addValue: s is %v\n", s)
}
func addValuetest() {
s := []int{1, 2}
fmt.Printf("In main, before addValue: s is %v\n", s)
addValue(&s)
fmt.Printf("In main, after addValue: s is %v\n", s)
/*
结果:
In main, before addValue: s is [1 2]
In addValue: s is &[1 2 3]
In main, after addValue: s is [1 2 3]
*/
}
func sliceExtract() {
a := [5]int{1, 2, 3, 4, 5}
/*
操作符 [i,j]。基于数组(切片)可以使用操作符 [i,j] 创建新的切片,
从索引 i,到索引 j 结束,截取已有数组(切片)的任意部分,返回新的切片,
新切片的值包含原数组(切片)的 i 索引的值,但是不包含 j 索引的值。i、j 都是可选的,
i 如果省略,默认是 0,j 如果省略,默认是原数组(切片)的长度。i、j 都不能超过这个长度值。
假如底层数组的大小为 k,截取之后获得的切片的长度和容量的计算方法:长度:j-i,容量:k-i。
截取操作符还可以有第三个参数,形如 [i,j,k],第三个参数 k 用来限制新切片的容量,
但不能超过原数组(切片)的底层数组大小。截取获得的切片的长度和容量分别是:j-i、k-i。
所以例子中,切片 t 为 [4],长度和容量都是 1。
*/
t := a[3:4:4]
fmt.Println(t[0])
}
/*
func arrayLenth() {
a := [2]int{5, 6}
b := [3]int{5, 6}
数组的长度也是数组类型的组成部分,
所以 a 和 b 是不同的类型,是不能比较的,所以编译错
a == b 这个会编译报错
if a == b {
fmt.Println("equal")
} else {
fmt.Println("not equal")
}
}
*/
/*
cap() 函数不适用 map。
*/
func makeNon() {
s := make(map[string]int)
/*
删除 map 不存在的键值对时,不会报错,相当于没有任何作用;获取不存在的减值对时,
返回值类型对应的零值,所以返回 0。
*/
delete(s, "h")
fmt.Println(s["h"])
}
/*
nil 切片和空切片。nil 切片和 nil 相等,
一般用来表示一个不存在的切片;
空切片和 nil 不相等,表示一个空的集合。
*/
func nil1() {
//var s1 []int
var s2 = []int{}
if s2 == nil {
fmt.Println("yes nil")
} else {
fmt.Println("no nil")
}
}
/*
对于make slice而言,有两个概念需要搞清楚:长度跟容量。
容量表示底层数组的大小,长度是你可以使用的大小。
容量的用处在哪?在与当你用 append扩展长度时,
如果新的长度小于容量,不会更换底层数组,否则,go 会新申请一个底层数组,
拷贝这边的值过去,把原来的数组丢掉。也就是说,
容量的用途是:在数据拷贝和内存申请的消耗与内存占用之间提供一个权衡。
而长度的用途,则是为了帮助你限制切片可用成员的数量,提供边界查询的。
所以用 make 申请好空间后,需要注意不要越界【越 len 】
*/
/*
数组或切片的截取操作。截取操作有带 2 个或者 3 个参数,形如:[i:j] 和 [i:j:k],
从数组里取出胡切片和原数组共用,
原数组的容量决定了切片的容量不会大于原数组的容量, 除非切片apped扩容了, 有了的校报的地址
容量是总的容量, 不是剩余的容量
容量用在, append时, 是否要申请新的连续内存
假设截取对象的基础数组长度为 l。
对于操作符 [i:j]: 截取后的元素, 包括i, 包括j-1, 不包括j
如果 i 省略,i 就默认 0,
如果 j 省略,j 就默认为底层数组的长度,
截取得到的切片长度和容量计算方法是 j-i、l-i。
对于操作符 [i:j:k],
k用来限制切片的容量,但是不能大于数组的长度 l,
截取得到的切片长度和容量计算方法是 j-i、k-i。
*/
func sliceLenCap() {
s := [3]int{5, 6, 7}
a := s[:0]
fmt.Println("a=", a, ", len(a)=", len(a), ", cap(a)=", cap(a))
b := s[:2] // 从0开始索引, 取到的是第0个, 和第1个, 即5, 6
fmt.Println("b=", b, ", len(b)=", len(b), ", cap(b)=", cap(b))
c := s[1:2:cap(s)]
fmt.Println("c=", c, ", len(c)=", len(c), ", cap(c)=", cap(c))
b[0] = 10
fmt.Println("s=", s)
/*
结果:
a= [] , len(a)= 0 , cap(a)= 3
b= [5 6] , len(b)= 2 , cap(b)= 3
c= [6] , len(c)= 1 , cap(c)= 2
s= [10 6 7]
*/
}
func ArrraySlice() {
fmt.Println("<-------------------------ArrraySlice begin -------------------> ")
array()
rangeBak()
slice1()
slice2()
testmultiPara()
addValuetest()
makeNon()
sliceExtract()
nil1()
sliceLenCap()
fmt.Println("<-------------------------ArrraySlice end -------------------> ")
}
|
/**
* Copyright (c) 2018 ZTE Corporation.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* ZTE - initial Project
*/
package log
import (
"msb2pilot/util"
"os"
"strings"
"testing"
)
func TestCheckLogDir(t *testing.T) {
cases := []struct {
path string
exist bool
}{
{
path: ``,
exist: false,
},
{
path: `test.log`,
exist: false,
},
{
path: `.` + util.PathSep + `test` + util.PathSep + `test.log`,
exist: true,
},
}
for _, cas := range cases {
checkLogDir(cas.path)
index := strings.LastIndex(cas.path, util.PathSep)
if cas.exist && !util.FileExists(cas.path[0:index]) {
t.Errorf("checkLogDir() => dir not exist, want %s", cas.path)
}
}
// clear
os.RemoveAll("test")
}
func TestLoadCustom(t *testing.T) {
cases := []struct {
path string
want string
}{
{
path: `..` + util.PathSep + "conf" + util.PathSep + cfgFileName,
want: "success",
},
{
path: ``,
want: "read file error",
},
{
path: `log_test.go`,
want: "parse config file error",
},
}
for _, cas := range cases {
res := loadCustom(cas.path)
if (res == nil && cas.want == "success") || (res != nil && cas.want != "success") {
t.Errorf("loadCustom() => want %s, got %v", cas.want, res)
}
}
}
|
package constants
type Response struct {
Response int
Message interface{}
}
|
package local
import (
"testing"
)
func TestCalculateShard(t *testing.T) {
tests := []struct {
size int
shard int
shards int
start int
count int
}{
{22, 1, 10, 0, 3},
{22, 2, 10, 3, 3},
{22, 3, 10, 6, 2},
{29, 10, 10, 27, 2},
{2, 1, 6, 0, 1}, // more shards than elements
{2, 2, 6, 1, 1}, // more shards than elements
{2, 3, 6, 2, 0}, // more shards than elements
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
if gotStart, gotCount := calculateShard(tt.size, tt.shard, tt.shards); gotStart != tt.start || gotCount != tt.count {
t.Errorf("calculateShard() = %v, %v, want %v, %v", gotStart, gotCount, tt.start, tt.count)
}
})
}
}
|
// Copyright © 2017 Aeneas Rekkas <aeneas+oss@aeneas.io>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"testing"
"github.com/julienschmidt/httprouter"
"github.com/ory/hydra/config"
)
func TestStart(t *testing.T) {
router := httprouter.New()
h := &Handler{
Config: &config.Config{
DatabaseURL: "memory",
},
}
h.registerRoutes(router)
}
|
// This program demonstrates how to attach an eBPF program to a uretprobe.
// The program will be attached to the 'readline' symbol in the binary '/bin/bash' and print out
// the line which 'readline' functions returns to the caller.
//go:build amd64
package main
import (
"bytes"
"encoding/binary"
"errors"
"log"
"os"
"os/signal"
"syscall"
"github.com/cilium/ebpf/link"
"github.com/cilium/ebpf/perf"
"github.com/cilium/ebpf/rlimit"
"golang.org/x/sys/unix"
)
// $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64 -type event bpf uretprobe.c -- -I../headers
const (
// The path to the ELF binary containing the function to trace.
// On some distributions, the 'readline' function is provided by a
// dynamically-linked library, so the path of the library will need
// to be specified instead, e.g. /usr/lib/libreadline.so.8.
// Use `ldd /bin/bash` to find these paths.
binPath = "/bin/bash"
symbol = "readline"
)
func main() {
stopper := make(chan os.Signal, 1)
signal.Notify(stopper, os.Interrupt, syscall.SIGTERM)
// Allow the current process to lock memory for eBPF resources.
if err := rlimit.RemoveMemlock(); err != nil {
log.Fatal(err)
}
// Load pre-compiled programs and maps into the kernel.
objs := bpfObjects{}
if err := loadBpfObjects(&objs, nil); err != nil {
log.Fatalf("loading objects: %s", err)
}
defer objs.Close()
// Open an ELF binary and read its symbols.
ex, err := link.OpenExecutable(binPath)
if err != nil {
log.Fatalf("opening executable: %s", err)
}
// Open a Uretprobe at the exit point of the symbol and attach
// the pre-compiled eBPF program to it.
up, err := ex.Uretprobe(symbol, objs.UretprobeBashReadline, nil)
if err != nil {
log.Fatalf("creating uretprobe: %s", err)
}
defer up.Close()
// Open a perf event reader from userspace on the PERF_EVENT_ARRAY map
// described in the eBPF C program.
rd, err := perf.NewReader(objs.Events, os.Getpagesize())
if err != nil {
log.Fatalf("creating perf event reader: %s", err)
}
defer rd.Close()
go func() {
// Wait for a signal and close the perf reader,
// which will interrupt rd.Read() and make the program exit.
<-stopper
log.Println("Received signal, exiting program..")
if err := rd.Close(); err != nil {
log.Fatalf("closing perf event reader: %s", err)
}
}()
log.Printf("Listening for events..")
// bpfEvent is generated by bpf2go.
var event bpfEvent
for {
record, err := rd.Read()
if err != nil {
if errors.Is(err, perf.ErrClosed) {
return
}
log.Printf("reading from perf event reader: %s", err)
continue
}
if record.LostSamples != 0 {
log.Printf("perf event ring buffer full, dropped %d samples", record.LostSamples)
continue
}
// Parse the perf event entry into a bpfEvent structure.
if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil {
log.Printf("parsing perf event: %s", err)
continue
}
log.Printf("%s:%s return value: %s", binPath, symbol, unix.ByteSliceToString(event.Line[:]))
}
}
|
package proto
const (
MSG_PUB_BATCH = 'a'
MSG_PUB_ONE = 'b'
MSG_PUB_TIMER = 'c'
MSG_PUB_TIMER_ACK = 'd'
MSG_PUB_RESTORE = 'e'
MSG_SUB = 'f'
MSG_SUBACK = 'g'
MSG_UNSUB = 'h'
MSG_PING = 'i'
MSG_PONG = 'j'
MSG_COUNT = 'k'
MSG_PULL = 'l'
MSG_CONNECT = 'm'
MSG_CONNECT_OK = 'n'
MSG_BROADCAST = 'o'
MSG_REDUCE_COUNT = 'p'
MSG_MARK_READ = 'r'
MSG_JOIN_CHAT = 's'
MSG_LEAVE_CHAT = 't'
MSG_PRESENCE_ONLINE = 'u'
MSG_PRESENCE_OFFLINE = 'v'
MSG_PRESENCE_ALL = 'q'
MSG_ALL_CHAT_USERS = 'w'
MSG_RETRIEVE = 'x'
)
const (
NORMAL_MSG = 0
TIMER_MSG = 1
)
const (
QOS0 = 0
QOS1 = 1
)
var (
DEFAULT_QUEUE = []byte("meq.io")
MSG_NEWEST_OFFSET = []byte("0")
)
const (
MAX_PULL_COUNT = 100
CacheFlushLen = 200
REDUCE_ALL_COUNT = 0
MAX_IDLE_TIME = 120
NeverExpires = 0
MSG_ID_LENGTH = 19
)
|
package server
import (
"net/http"
"log"
)
func Start(addr string) {
http.HandleFunc("/crawl", handleCrawl)
http.HandleFunc("/wallpager", handleWallPager)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatal(err)
}
}
|
package main
import (
//"github.com/gin-gonic/gin"
//"io"
"io/ioutil"
//"github.com/go-martini/martini"
"encoding/json"
"log"
"net/http"
)
type Results struct {
elements []Elements `json:"elements"`
}
type Elements struct {
denominator int32 `json:"denominator"`
numerator int32 `json:"numerator"`
name string `json:"name"`
ratio float32 `json:"ratio"`
}
func getJSON() {
resp, _ := http.Get("http://controller.c3.mtv/job/Live-Usher-Cobertura/345/cobertura/api/json?depth=2")
//JsonBody := resp.Body
body, _ := ioutil.ReadAll(resp.Body)
//fmt.Printf("data s-is : %s", string(body))
//decoder := json.NewDecoder(resp.Body)
var r Results
_ = json.Unmarshal(body, &r)
/*for {
if err := decoder.Decode(&r); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
}*/
//fmt.Println(r.elements.name[1])
log.Printf("%+v", r)
}
func main() {
/*
router := gin.Default()
router.GET("/", func(c *gin.Context) {
c.String(200, "hello world")
})
router.Run(":8080")
*/
getJSON()
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//407. Trapping Rain Water II
//Given an m x n matrix of positive integers representing the height of each unit cell in a 2D elevation map, compute the volume of water it is able to trap after raining.
//Note:
//Both m and n are less than 110. The height of each unit cell is greater than 0 and is less than 20,000.
//Example:
//Given the following 3x6 height map:
//[
// [1,4,3,1,3,2],
// [3,2,1,3,2,4],
// [2,3,3,2,3,1]
//]
//Return 4.
//The above image represents the elevation map [[1,4,3,1,3,2],[3,2,1,3,2,4],[2,3,3,2,3,1]] before the rain.
//After the rain, water is trapped between the blocks. The total volume of water trapped is 4.
//func trapRainWater(heightMap [][]int) int {
//}
// Time Is Money |
package controllers
import (
"github.com/astaxie/beego"
"nepliteApi/models"
"github.com/astaxie/beego/logs"
"encoding/json"
"nepliteApi/comm"
)
type SomeNewsController struct {
beego.Controller
}
func (someNewObj *SomeNewsController) GetAll() {
result := comm.Result{Ret: map[string]interface{}{"err": "aaa", "num": 0, "result": ""}}
logs.Info("someNewObj : %s", result.Ret)
if _l_snlist, num, err := models.GetNews(); err != nil {
logs.Info("_l_snlist : %s, err %s", _l_snlist, err.Error())
result.SetValue(err.Error(), num, "获得最新的公告数据出现了错误")
} else {
result.SetResult(_l_snlist)
}
someNewObj.Data["json"] = result.Get()
someNewObj.ServeJSON()
}
func (someNewObj *SomeNewsController) Add() {
result := comm.Result{Ret: map[string]interface{}{"err": "aaa", "num": 0, "result": ""}}
var _l_temp_somenews models.SomeNews
logs.Info("请求数据是: %s", someNewObj.Ctx.Input.RequestBody)
err := json.Unmarshal(someNewObj.Ctx.Input.RequestBody, &_l_temp_somenews)
if err != nil {
result.SetValue("-1", 0, err.Error())
someNewObj.Data["json"] = result.Get()
someNewObj.ServeJSON()
}
if id, err := models.AddNews(_l_temp_somenews); err != nil {
result.SetValue("-2", 0, err.Error())
logs.Info("反馈的id是: %d", id)
} else {
result.SetResult("")
}
someNewObj.Data["json"] = result.Get()
someNewObj.ServeJSON()
}
|
package main
import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"strings"
"time"
"github.com/goburrow/modbus"
"github.com/nsqio/go-nsq"
"github.com/olebedev/config"
)
var (
allData = make(map[string]float64)
config_nsq = nsq.NewConfig()
)
func set(key string, value float64) {
allData[key] = value
}
func periodicFunc(tick time.Time, client modbus.Client, nsq_prod string, pl []string, start_bit int, stop_bit int) {
nums := [100]float64{}
for i := start_bit; i < stop_bit; i++ {
result, err := client.ReadInputRegisters(uint16(i), 4)
value := binary.BigEndian.Uint16(result)
nums[i] = float64(value) / 10
if err != nil {
log.Fatalf("%v", err)
}
}
data := nums[start_bit:stop_bit]
for i := range pl {
set(pl[i], data[i])
}
p, err := nsq.NewProducer(nsq_prod, config_nsq)
if err != nil {
log.Panic(err)
}
payload, err := json.Marshal(allData)
if err != nil {
log.Println(err)
}
err = p.Publish("My_NSQ_Topic", payload)
if err != nil {
log.Panic(err)
}
}
func main() {
file, err := ioutil.ReadFile("config.yml")
if err != nil {
panic(err)
}
yamlString := string(file)
cfg, err := config.ParseYaml(yamlString)
ip, err := cfg.String("production.weintek.ip")
var client = modbus.TCPClient(ip)
nsq_prod, err := cfg.String("production.nsq.producer")
plant_data, err := cfg.String("production.weintek.variables")
pl := strings.Split(plant_data, ",")
start_bit, err := cfg.Int("production.weintek.start_bit")
stop_bit, err := cfg.Int("production.weintek.stop_bit")
fmt.Println(start_bit, stop_bit)
for t := range time.NewTicker(1 * time.Second).C {
periodicFunc(t, client, nsq_prod, pl, start_bit, stop_bit)
}
}
|
package heap
import "testing"
func TestHeap(t *testing.T) {
heap := NewHeap()
heap.Push(3)
t.Log(heap.Top())
heap.Push(4)
t.Log(heap.Top())
heap.Push(2)
t.Log(heap.Top())
heap.Pop()
t.Log(heap.Top())
heap.Pop()
t.Log(heap.Top())
}
|
package pacit
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
)
const (
IP_ICMP = 0x01
IP_TCP = 0x06
IP_UDP = 0x11
IP_IPv6 = 0x29
IP_IPv6ICMP = 0x3a
)
type IPv4 struct {
Version uint8 //4-bits
IHL uint8 //4-bits
DSCP uint8 //6-bits
ECN uint8 //2-bits
Length uint16
ID uint16
Flags uint16 //3-bits
FragmentOffset uint16 //13-bits
TTL uint8
Protocol uint8
Checksum uint16
NWSrc net.IP
NWDst net.IP
Options []byte
Data ReadWriteMeasurer
}
func (i *IPv4) Len() (n uint16) {
if i.Data != nil {
return uint16(i.IHL*4) + i.Data.Len()
}
return uint16(i.IHL * 4)
}
func (i *IPv4) Read(b []byte) (n int, err error) {
buf := new(bytes.Buffer)
var verIhl uint8 = (i.Version << 4) + i.IHL
binary.Write(buf, binary.BigEndian, verIhl)
var dscpEcn uint8 = (i.DSCP << 2) + i.ECN
binary.Write(buf, binary.BigEndian, dscpEcn)
binary.Write(buf, binary.BigEndian, i.Length)
binary.Write(buf, binary.BigEndian, i.ID)
var flagsFrag uint16 = (i.Flags << 13) + i.FragmentOffset
binary.Write(buf, binary.BigEndian, flagsFrag)
binary.Write(buf, binary.BigEndian, i.TTL)
binary.Write(buf, binary.BigEndian, i.Protocol)
binary.Write(buf, binary.BigEndian, i.Checksum)
binary.Write(buf, binary.BigEndian, i.NWSrc)
binary.Write(buf, binary.BigEndian, i.NWDst)
binary.Write(buf, binary.BigEndian, i.Options)
if i.Data != nil {
if n, err := buf.ReadFrom(i.Data); n == 0 {
return int(n), err
}
}
if n, err = buf.Read(b); n == 0 {
return
}
return n, io.EOF
}
func (i *IPv4) ReadFrom(r io.Reader) (n int64, err error) {
var verIhl uint8 = 0
if err = binary.Read(r, binary.BigEndian, &verIhl); err != nil {
return
}
n += 1
i.Version = verIhl >> 4
i.IHL = verIhl & 0x0f
var dscpEcn uint8 = 0
if err = binary.Read(r, binary.BigEndian, &dscpEcn); err != nil {
return
}
n += 1
i.DSCP = dscpEcn >> 2
i.ECN = dscpEcn & 0x03
if err = binary.Read(r, binary.BigEndian, &i.Length); err != nil {
return
}
n += 2
if err = binary.Read(r, binary.BigEndian, &i.ID); err != nil {
return
}
n += 2
var flagsFrag uint16 = 0
if err = binary.Read(r, binary.BigEndian, &flagsFrag); err != nil {
return
}
n += 2
i.Flags = flagsFrag >> 13
i.FragmentOffset = flagsFrag & 0x1fff
if err = binary.Read(r, binary.BigEndian, &i.TTL); err != nil {
return
}
n += 1
if err = binary.Read(r, binary.BigEndian, &i.Protocol); err != nil {
return
}
n += 1
if err = binary.Read(r, binary.BigEndian, &i.Checksum); err != nil {
return
}
n += 2
i.NWSrc = make([]byte, 4)
if err = binary.Read(r, binary.BigEndian, &i.NWSrc); err != nil {
return
}
n += 4
i.NWDst = make([]byte, 4)
if err = binary.Read(r, binary.BigEndian, &i.NWDst); err != nil {
return
}
n += 4
if int(i.IHL) > 5 {
i.Options = make([]byte, 4*(int(i.IHL)-5))
if err = binary.Read(r, binary.BigEndian, &i.Options); err != nil {
return
}
n += int64(len(i.Options))
}
switch i.Protocol {
case IP_ICMP:
trash := make([]byte, int(i.Length-20))
binary.Read(r, binary.BigEndian, &trash)
i.Data = new(ICMP)
if n, err := i.Data.Read(trash); err != nil {
return int64(n), err
}
case IP_UDP:
i.Data = new(UDP)
data := make([]byte, int(i.Length-20))
binary.Read(r, binary.BigEndian, &data)
if n, err := i.Data.Read(data); err != nil {
return int64(n), err
}
default:
trash := make([]byte, int(i.Length-20))
binary.Read(r, binary.BigEndian, &trash)
}
n = int64(i.Length)
return
}
func (i *IPv4) Write(b []byte) (n int, err error) {
verIhl := b[0]
n += 1
i.Version = verIhl >> 4
i.IHL = verIhl & 0x0f
dscpEcn := b[1]
n += 1
i.DSCP = dscpEcn >> 2
i.ECN = dscpEcn & 0x03
i.Length = binary.BigEndian.Uint16(b[2:4])
n += 2
i.ID = binary.BigEndian.Uint16(b[4:6])
n += 2
flagsFrag := binary.BigEndian.Uint16(b[6:8])
n += 2
i.Flags = flagsFrag >> 13
i.FragmentOffset = flagsFrag & 0x1fff
i.TTL = b[8]
n += 1
i.Protocol = b[9]
n += 1
i.Checksum = binary.BigEndian.Uint16(b[10:12])
n += 2
i.NWSrc = make([]byte, 4)
i.NWSrc = b[12:16]
n += 4
i.NWDst = make([]byte, 4)
i.NWDst = b[16:20]
n += 4
if int(i.IHL) > 5 {
optLen := 4 * (int(i.IHL) - 5)
i.Options = make([]byte, optLen)
i.Options = b[20 : 20+optLen]
n += optLen
}
switch i.Protocol {
case IP_ICMP:
i.Data = new(ICMP)
m, err := i.Data.Write(b[n:])
if err != nil {
return m, err
}
n += m
case IP_UDP:
i.Data = new(UDP)
m, err := i.Data.Write(b[n:])
if err != nil {
return m, err
}
n += m
default:
panic(fmt.Sprintf("%0x\n", i.Protocol))
// trash := make([]byte, int(i.Length-20))
// binary.Read(buf, binary.BigEndian, &trash)
n = int(i.Length)
}
return
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package membuf
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
)
type testAllocator struct {
allocs int
frees int
}
func (t *testAllocator) Alloc(n int) []byte {
t.allocs++
return make([]byte, n)
}
func (t *testAllocator) Free(_ []byte) {
t.frees++
}
func TestBufferPool(t *testing.T) {
allocator := &testAllocator{}
pool := NewPool(
WithPoolSize(2),
WithAllocator(allocator),
WithBlockSize(1024),
WithLargeAllocThreshold(512),
)
defer pool.Destroy()
bytesBuf := pool.NewBuffer()
bytesBuf.AllocBytes(256)
require.Equal(t, 1, allocator.allocs)
bytesBuf.AllocBytes(512)
require.Equal(t, 1, allocator.allocs)
bytesBuf.AllocBytes(257)
require.Equal(t, 2, allocator.allocs)
bytesBuf.AllocBytes(767)
require.Equal(t, 2, allocator.allocs)
largeBytes := bytesBuf.AllocBytes(513)
require.Equal(t, 513, len(largeBytes))
require.Equal(t, 2, allocator.allocs)
require.Equal(t, 0, allocator.frees)
bytesBuf.Destroy()
require.Equal(t, 0, allocator.frees)
bytesBuf = pool.NewBuffer()
for i := 0; i < 6; i++ {
bytesBuf.AllocBytes(512)
}
bytesBuf.Destroy()
require.Equal(t, 3, allocator.allocs)
require.Equal(t, 1, allocator.frees)
}
func TestBufferIsolation(t *testing.T) {
pool := NewPool(WithBlockSize(1024))
defer pool.Destroy()
bytesBuf := pool.NewBuffer()
defer bytesBuf.Destroy()
b1 := bytesBuf.AllocBytes(16)
b2 := bytesBuf.AllocBytes(16)
require.Len(t, b1, cap(b1))
require.Len(t, b2, cap(b2))
_, err := rand.Read(b2)
require.NoError(t, err)
b3 := append([]byte(nil), b2...)
b1 = append(b1, 0, 1, 2, 3)
require.Equal(t, b3, b2)
require.NotEqual(t, b2, b1)
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package dao
import (
"errors"
"fmt"
"gorm.io/gorm"
"bscp.io/pkg/dal/gen"
"bscp.io/pkg/dal/table"
"bscp.io/pkg/kit"
"bscp.io/pkg/tools"
"bscp.io/pkg/types"
)
// AppTemplateBinding supplies all the app template binding related operations.
type AppTemplateBinding interface {
// Create one app template binding instance.
Create(kit *kit.Kit, atb *table.AppTemplateBinding) (uint32, error)
// Update one app template binding's info.
Update(kit *kit.Kit, atb *table.AppTemplateBinding) error
// UpdateWithTx Update one app template binding's info with transaction.
UpdateWithTx(kit *kit.Kit, tx *gen.QueryTx, atb *table.AppTemplateBinding) error
// List app template bindings with options.
List(kit *kit.Kit, bizID, appID uint32, opt *types.BasePage) ([]*table.AppTemplateBinding, int64, error)
// Delete one app template binding instance.
Delete(kit *kit.Kit, atb *table.AppTemplateBinding) error
}
var _ AppTemplateBinding = new(appTemplateBindingDao)
type appTemplateBindingDao struct {
genQ *gen.Query
idGen IDGenInterface
auditDao AuditDao
}
// Create one app template binding instance.
func (dao *appTemplateBindingDao) Create(kit *kit.Kit, g *table.AppTemplateBinding) (uint32, error) {
if err := g.ValidateCreate(); err != nil {
return 0, err
}
if err := dao.validateAttachmentExist(kit, g.Attachment); err != nil {
return 0, err
}
// generate a app template binding id and update to app template binding.
id, err := dao.idGen.One(kit, table.Name(g.TableName()))
if err != nil {
return 0, err
}
g.ID = id
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareCreate(g)
// 多个使用事务处理
createTx := func(tx *gen.Query) error {
q := tx.AppTemplateBinding.WithContext(kit.Ctx)
if err := q.Create(g); err != nil {
return err
}
if err := ad.Do(tx); err != nil {
return err
}
return nil
}
if err = dao.genQ.Transaction(createTx); err != nil {
return 0, err
}
return g.ID, nil
}
// Update one app template binding instance.
func (dao *appTemplateBindingDao) Update(kit *kit.Kit, g *table.AppTemplateBinding) error {
if err := g.ValidateUpdate(); err != nil {
return err
}
if err := dao.validateAttachmentExist(kit, g.Attachment); err != nil {
return err
}
// 更新操作, 获取当前记录做审计
m := dao.genQ.AppTemplateBinding
q := dao.genQ.AppTemplateBinding.WithContext(kit.Ctx)
oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take()
if err != nil {
return err
}
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareUpdate(g, oldOne)
// 多个使用事务处理
updateTx := func(tx *gen.Query) error {
q = tx.AppTemplateBinding.WithContext(kit.Ctx)
if _, err = q.Where(m.BizID.Eq(g.Attachment.BizID), m.ID.Eq(g.ID)).
Select(m.Bindings, m.TemplateSpaceIDs, m.TemplateSetIDs, m.TemplateIDs, m.TemplateRevisionIDs,
m.LatestTemplateIDs, m.Creator, m.Reviser, m.UpdatedAt).
Updates(g); err != nil {
return err
}
if err = ad.Do(tx); err != nil {
return err
}
return nil
}
if err = dao.genQ.Transaction(updateTx); err != nil {
return err
}
return nil
}
// UpdateWithTx Update one app template binding's info with transaction.
func (dao *appTemplateBindingDao) UpdateWithTx(kit *kit.Kit, tx *gen.QueryTx,
g *table.AppTemplateBinding) error {
if err := g.ValidateUpdate(); err != nil {
return err
}
if err := dao.validateAttachmentExist(kit, g.Attachment); err != nil {
return err
}
// 更新操作, 获取当前记录做审计
m := tx.AppTemplateBinding
q := tx.AppTemplateBinding.WithContext(kit.Ctx)
oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take()
if err != nil {
return err
}
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareUpdate(g, oldOne)
if err := ad.Do(tx.Query); err != nil {
return err
}
q = tx.AppTemplateBinding.WithContext(kit.Ctx)
if _, err = q.Where(m.BizID.Eq(g.Attachment.BizID), m.ID.Eq(g.ID)).
Select(m.Bindings, m.TemplateSpaceIDs, m.TemplateSetIDs, m.TemplateIDs, m.TemplateRevisionIDs,
m.LatestTemplateIDs, m.Creator, m.Reviser, m.UpdatedAt).
Updates(g); err != nil {
return err
}
return nil
}
// List app template bindings with options.
func (dao *appTemplateBindingDao) List(kit *kit.Kit, bizID, appID uint32,
opt *types.BasePage) ([]*table.AppTemplateBinding, int64, error) {
m := dao.genQ.AppTemplateBinding
q := dao.genQ.AppTemplateBinding.WithContext(kit.Ctx)
d := q.Where(m.BizID.Eq(bizID), m.AppID.Eq(appID))
if opt.All {
result, err := d.Find()
if err != nil {
return nil, 0, err
}
return result, int64(len(result)), err
}
return d.FindByPage(opt.Offset(), opt.LimitInt())
}
// Delete one app template binding instance.
func (dao *appTemplateBindingDao) Delete(kit *kit.Kit, g *table.AppTemplateBinding) error {
// 参数校验
if err := g.ValidateDelete(); err != nil {
return err
}
// 删除操作, 获取当前记录做审计
m := dao.genQ.AppTemplateBinding
q := dao.genQ.AppTemplateBinding.WithContext(kit.Ctx)
oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take()
if err != nil {
return err
}
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareDelete(oldOne)
// 多个使用事务处理
deleteTx := func(tx *gen.Query) error {
q = tx.AppTemplateBinding.WithContext(kit.Ctx)
if _, err := q.Where(m.BizID.Eq(g.Attachment.BizID)).Delete(g); err != nil {
return err
}
if err := ad.Do(tx); err != nil {
return err
}
return nil
}
if err := dao.genQ.Transaction(deleteTx); err != nil {
return err
}
return nil
}
// validateAttachmentExist validate if attachment resource exists before operating template
func (dao *appTemplateBindingDao) validateAttachmentExist(kit *kit.Kit, am *table.AppTemplateBindingAttachment) error {
m := dao.genQ.App
q := dao.genQ.App.WithContext(kit.Ctx)
if _, err := q.Where(m.ID.Eq(am.AppID)).Take(); err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return fmt.Errorf("template attached app %d is not exist", am.AppID)
}
return fmt.Errorf("get template attached app failed, err: %v", err)
}
return nil
}
// validateTemplateSetsExist validate if all app template bindings resource exists before operating app template binding
func (dao *appTemplateBindingDao) validateTemplateSetsExist(kit *kit.Kit, templateSetIDs []uint32) error {
m := dao.genQ.TemplateSet
q := dao.genQ.TemplateSet.WithContext(kit.Ctx)
var existIDs []uint32
if err := q.Where(m.ID.In(templateSetIDs...)).Pluck(m.ID, &existIDs); err != nil {
return fmt.Errorf("validate template sets exist failed, err: %v", err)
}
diffIDs := tools.SliceDiff(templateSetIDs, existIDs)
if len(diffIDs) > 0 {
return fmt.Errorf("template set id in %v is not exist", diffIDs)
}
return nil
}
|
package controllers
import (
"github.com/astaxie/beego"
)
type DefaultController struct {
beego.Controller
}
func (this *DefaultController) Get() {
this.Ctx.WriteString("GoGameServer: hello world")
}
|
package parser
import (
"unicode"
"unicode/utf8"
)
func lexReference(l *StatefulRubyLexer) stateFn {
l.acceptRun(alphaNumericUnderscore + "!")
switch l.input[l.start:l.pos] {
case "def":
l.emit(tokenTypeDEF)
case "do":
l.emit(tokenTypeDO)
case "end":
l.emit(tokenTypeEND)
case "if":
l.emit(tokenTypeIF)
case "else":
l.emit(tokenTypeELSE)
case "elsif":
l.emit(tokenTypeELSIF)
case "unless":
l.emit(tokenTypeUNLESS)
case "class":
l.emit(tokenTypeCLASS)
case "module":
l.emit(tokenTypeMODULE)
case "true":
l.emit(tokenTypeTRUE)
case "false":
l.emit(tokenTypeFALSE)
case "__FILE__":
l.emit(tokenType__FILE__)
case "__LINE__":
l.emit(tokenType__LINE__)
case "__ENCODING__":
l.emit(tokenType__ENCODING__)
case "for":
l.emit(tokenTypeFOR)
case "while":
l.emit(tokenTypeWHILE)
case "until":
l.emit(tokenTypeUNTIL)
case "begin":
l.emit(tokenTypeBEGIN)
case "rescue":
l.emit(tokenTypeRESCUE)
case "ensure":
l.emit(tokenTypeENSURE)
case "break":
l.emit(tokenTypeBREAK)
case "redo":
l.emit(tokenTypeREDO)
case "retry":
l.emit(tokenTypeRETRY)
case "return":
l.emit(tokenTypeRETURN)
case "yield":
l.emit(tokenTypeYIELD)
default:
r, _ := utf8.DecodeRuneInString(l.input[l.start:])
if unicode.IsUpper(r) {
l.emit(tokenTypeCapitalizedReference)
} else {
l.emit(tokenTypeReference)
}
}
return lexAnything
}
|
package main
import (
"sync"
)
type ThreadPool struct {
available chan struct{}
size int
group sync.WaitGroup
}
func NewThreadPool(size int) *ThreadPool {
available := make(chan struct{}, size)
for i := 0; i < size; i++ {
available <- struct{}{}
}
return &ThreadPool{
group: sync.WaitGroup{},
available: available,
size: size,
}
}
func (pool *ThreadPool) Do(task func()) {
<-pool.available
pool.group.Add(1)
go func() {
defer func() {
pool.group.Done()
pool.available <- struct{}{}
}()
task()
}()
}
func (pool *ThreadPool) Wait() {
pool.group.Wait()
}
|
// Copyright 2014 Gyepi Sam. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package redux
// NullDb is a blackhole database, used for source files outside the redo project directory structure..
// It never fails, all writes disappear, and reads return nothing.
type NullDb struct {
}
// Open requires a project root argument
func NullDbOpen(ignored string) (DB, error) {
return &NullDb{}, nil
}
func (db *NullDb) IsNull() bool { return true }
// Put stores value under key
func (db *NullDb) Put(key string, value []byte) error {
return nil
}
// Get returns the value stored under key and a boolean indicating
// whether the returned value was actually found in the database.
func (db *NullDb) Get(key string) ([]byte, bool, error) {
return []byte{}, false, nil
}
// Delete removes the value stored under key.
// If key does not exist, the operation is a noop.
func (db *NullDb) Delete(key string) error {
return nil
}
// GetKeys returns a list of keys which have the specified key as a prefix.
func (db *NullDb) GetKeys(prefix string) ([]string, error) {
return []string{}, nil
}
// GetValues returns a list of values whose keys matching the specified key prefix.
func (db *NullDb) GetValues(prefix string) ([][]byte, error) {
return [][]byte{}, nil
}
// GetRecords returns a list of records (keys and data) matchign the specified key prefix.
func (db *NullDb) GetRecords(prefix string) ([]Record, error) {
return []Record{}, nil
}
func (db *NullDb) Close() error {
return nil
}
|
package _020_10_20
func permute(nums []int) [][]int {
n := len(nums)
res := make([][]int, 0)
out := make([]int, n)
for i, v := range nums {
out[i] = v
}
backtrack(0, n, out, &res)
return res
}
func backtrack(first int, n int, out []int, res *[][]int) {
if first == n {
*res = append(*res, append([]int{}, out...))
}
for i := first; i < n; i++ {
out[first], out[i] = out[i], out[first]
backtrack(first+1, n, out, res)
out[first], out[i] = out[i], out[first]
}
}
|
package controller
import (
"context"
"encoding/json"
"strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/shijunLee/docker-secret-tools/pkg/utils"
)
type WorkloadReconciler struct {
client.Client
Log logr.Logger
Object client.Object
NotManagerOwners []string
DockerSecretNames []string
}
func (w *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var object = &unstructured.Unstructured{}
object.SetGroupVersionKind(w.Object.GetObjectKind().GroupVersionKind())
err := w.Client.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, object)
if err != nil {
if !k8serrors.IsNotFound(err) {
return ctrl.Result{}, err
} else {
return ctrl.Result{}, err
}
}
jsonData, err := object.MarshalJSON()
if err != nil {
w.Log.Error(err, "get json data error")
}
imageList, err := utils.GetImageFromJSON(ctx, string(jsonData))
if err != nil {
w.Log.Error(err, "get image from data error")
return ctrl.Result{}, nil
}
if len(imageList) == 0 {
return ctrl.Result{}, nil
}
imageSecrets := w.getImagesSecrets(ctx, imageList)
var replaceImageSecrets []string
for _, item := range imageSecrets {
var secret = &corev1.Secret{}
err = w.Client.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: item.Name}, secret)
if err != nil && k8serrors.IsNotFound(err) {
item.Namespace = req.Namespace
err = w.Client.Create(ctx, &item)
if err != nil {
w.Log.Error(err, "create secret error", "SecretName", item.Name)
} else {
replaceImageSecrets = append(replaceImageSecrets, item.Name)
}
}
}
if len(replaceImageSecrets) > 0 {
var secretListKV []map[string]string
for _, secret := range replaceImageSecrets {
secretListKV = append(secretListKV, map[string]string{"name": secret})
}
var secretMaps map[string]interface{}
switch object.GetKind() {
case "Pod":
secretMaps = map[string]interface{}{
"spec": map[string]interface{}{
"imagePullSecrets": secretListKV,
},
}
default:
secretMaps = map[string]interface{}{
"spec": map[string]interface{}{
"template": map[string]interface{}{
"spec": map[string]interface{}{
"imagePullSecrets": secretListKV,
},
},
},
}
}
mergePatch, err := json.Marshal(secretMaps)
if err != nil {
w.Log.Error(err, "convert secret to json error")
}
err = w.Patch(ctx, object, client.RawPatch(types.StrategicMergePatchType, mergePatch))
if err != nil {
w.Log.Error(err, "patch object secret error", "Group", object.GroupVersionKind().Group,
"Version", object.GroupVersionKind().Version, "Kind", object.GroupVersionKind().Kind, "Name", object.GetName(),
"Namespace", object.GetNamespace())
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
func (w *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(w.Object).WithEventFilter(predicate.Funcs{
CreateFunc: func(event event.CreateEvent) bool {
return w.filterEventObject(event.Object)
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
return false
},
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
return false
},
}).Complete(w)
}
func (w *WorkloadReconciler) getImagesSecrets(ctx context.Context, images []string) []corev1.Secret {
var regsitrySecrets = w.getSecretAuthRegistry(ctx)
var result = []corev1.Secret{}
for k, v := range regsitrySecrets {
for _, image := range images {
imagePathURLSplits := strings.Split(image, ":")
if len(imagePathURLSplits) == 0 {
continue
}
imagePathSplits := strings.Split(imagePathURLSplits[0], "/")
if len(imagePathSplits) == 0 {
continue
}
imageHost := imagePathSplits[0]
if imageHost == k {
for _, item := range v {
result = append(result, item)
}
}
}
}
return result
}
func (w *WorkloadReconciler) getSecretAuthRegistry(ctx context.Context) map[string][]corev1.Secret {
var result = map[string][]corev1.Secret{}
var secrets = utils.GetDockerSecrets(ctx, w.Client, w.Log, w.DockerSecretNames)
for _, item := range secrets {
configData, ok := item.Data[".dockerconfigjson"]
if ok {
var dockerSecrets = &utils.DockerSecrets{}
err := json.Unmarshal(configData, dockerSecrets)
if err == nil {
for key, _ := range dockerSecrets.Auths {
if values, ok := result[key]; ok {
values = append(values, *item)
result[key] = values
} else {
var values []corev1.Secret
values = append(values, *item)
result[key] = values
}
}
} else {
w.Log.Error(err, "unmarshal docker secret to docker config error")
}
}
}
return result
}
func (w *WorkloadReconciler) filterEventObject(object client.Object) bool {
ownerReference := object.GetOwnerReferences()
if ownerReference != nil && len(ownerReference) > 0 {
for _, item := range ownerReference {
if item.APIVersion == "apps/v1" {
return false
}
for _, notManagerOwner := range w.NotManagerOwners {
if item.APIVersion == notManagerOwner {
return false
}
}
}
}
return true
}
|
// time: o(n), space: o(n)
func subarraySum(nums []int, k int) int {
m := make(map[int]int)
m[0] = 1
res := 0
sum := 0
for _, n := range nums {
sum += n
if i, ok := m[sum-k]; ok {
res += i
}
m[sum] += 1
}
return res
}
|
package parser
import (
"github.com/robfig/cron/v3"
)
// Parser is a cron parser
type Parser struct {
parser cron.Parser
}
// NewParser creates an Parser instance
func NewParser() cron.ScheduleParser {
return Parser{cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)}
}
// Parse parses a cron schedule specification. It accepts the cron spec with
// mandatory seconds parameter.
func (p Parser) Parse(spec string) (cron.Schedule, error) {
switch spec {
case "@yearly", "@annually":
spec = "0 0 0 1 1 * *"
case "@monthly":
spec = "0 0 0 1 * * *"
case "@weekly":
spec = "0 0 0 * * 0 *"
case "@daily":
spec = "0 0 0 * * * *"
case "@hourly":
spec = "0 0 * * * * *"
case "@minutely":
spec = "0 * * * * *"
}
return p.parser.Parse(spec)
}
var standaloneParser = NewParser()
// Parse parses a cron schedule.
func Parse(spec string) (cron.Schedule, error) {
return standaloneParser.Parse(spec)
}
|
package main
import "fmt"
func main0901() {
//初始化
s:= []int{10,20,30,40,50}
fmt.Println(cap(s))
//截取 s[low:high:max]
//len = high-low
//cap = max -low
//slice := s[:]
//slice := s
slice := s[2:]
fmt.Println(slice)
fmt.Println(len(slice))
fmt.Println(cap(slice))
}
func main() {
s := []int{0,1,2,3,4,5,6,7,8,9}
s1 := s[2:5]
//截取后的切片 还是原始切片中的一块内容 修改截取后的切片 影响原始切片
s1[2] = 999
fmt.Println(s1)
fmt.Println(s)
fmt.Printf("%p\n",s)
fmt.Printf("%p\n",s1)
} |
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package globalconfigsync
import (
"context"
"github.com/pingcap/tidb/util/logutil"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
)
// GlobalConfigSyncer is used to sync pd global config.
type GlobalConfigSyncer struct {
pd pd.Client
NotifyCh chan pd.GlobalConfigItem
}
// NewGlobalConfigSyncer creates a GlobalConfigSyncer.
func NewGlobalConfigSyncer(p pd.Client) *GlobalConfigSyncer {
return &GlobalConfigSyncer{
pd: p,
NotifyCh: make(chan pd.GlobalConfigItem, 8),
}
}
// StoreGlobalConfig is used to store global config.
func (s *GlobalConfigSyncer) StoreGlobalConfig(ctx context.Context, item pd.GlobalConfigItem) error {
if s.pd == nil {
return nil
}
err := s.pd.StoreGlobalConfig(ctx, "", []pd.GlobalConfigItem{item})
if err != nil {
return err
}
logutil.BgLogger().Info("store global config", zap.String("name", item.Name), zap.String("value", item.Value))
return nil
}
// Notify pushes global config to internal channel and will be sync into pd's GlobalConfig.
func (s *GlobalConfigSyncer) Notify(globalConfigItem pd.GlobalConfigItem) {
s.NotifyCh <- globalConfigItem
}
|
package leetcode
/*A valid parentheses string is either empty (""), "(" + A + ")", or A + B,
where A and B are valid parentheses strings, and + represents string concatenation.
For example, "", "()", "(())()", and "(()(()))" are all valid parentheses strings.
A valid parentheses string S is primitive if it is nonempty,
and there does not exist a way to split it into S = A+B, with A and B nonempty valid parentheses strings.
Given a valid parentheses string S, consider its primitive decomposition: S = P_1 + P_2 + ... + P_k,
where P_i are primitive valid parentheses strings.
Return S after removing the outermost parentheses of every primitive string in the primitive decomposition of S.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/remove-outermost-parentheses
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
import "strings"
func removeOuterParentheses(S string) string {
rlt := []rune(S)
length := len(rlt)
if length == 0 {
return S
}
cnt := 0
for i := 0; i < length; i++ {
switch rlt[i] {
case '(':
if cnt == 0 {
rlt[i] = ' '
}
cnt++
case ')':
if cnt == 0 {
rlt[i] = ' '
}
cnt--
default:
}
if cnt == 0 {
rlt[i] = ' '
}
}
return strings.Join(strings.Split(string(rlt), " "), "")
} |
package modules
import (
"sort"
)
type SearchResult struct {
Results []string
}
func (searchResult *SearchResult) Sorted() []string {
sort.Strings(searchResult.Results)
return searchResult.Results
}
func (searchResult *SearchResult) Append(result string) {
searchResult.Results = append(searchResult.Results, result)
}
func (searchResult *SearchResult) Len() int {
return len(searchResult.Results)
}
|
//go:generate reform
package front
import "github.com/empirefox/reform"
//reform:cc_cart
type CartItem struct {
ID uint `reform:"id,pk"`
CreatedAt int64 `reform:"created_at"`
UserID uint `reform:"user_id" json:"-"`
Name string `reform:"name"`
Img string `reform:"img"`
Type string `reform:"type"`
Price uint `reform:"price"`
Quantity uint `reform:"quantity"`
SkuID uint `reform:"sku_id"`
}
type SaveToCartPayload struct {
ID uint
Img string
Name string
Type string
Price uint
Quantity uint
SkuID uint
}
type CartResponse struct {
Items []reform.Struct // CartItem
Skus []reform.Struct // Sku
Products []reform.Struct // Product
}
|
package main
import "github.com/urfave/cli"
func application() *cli.App {
a := &cli.App{
Name: "Discord AniHouse server Bot",
Description: "Discord AniHouse server Bot",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "d, debug",
Usage: "Enable debug mode",
},
&cli.StringFlag{
Name: "c, config",
Value: "./config/work/config.json",
Usage: "Path to the config file",
},
},
Action: run,
}
a.UseShortOptionHandling = true
return a
}
|
package sudoku
import (
"container/heap"
"fmt"
"runtime"
"strconv"
"sync"
)
/*
* A CompoundSolveStep is a series of 0 or more PrecursorSteps that are cull
* steps (as opposed to fill steps), terminated with a single, non-optional
* fill step. This organization reflects the observation that cull steps are
* only useful if they help advance the grid to a state where a FillStep is
* obvious in the short-term.
*
* The process of finding a HumanSolution to a puzzle reduces down to an
* iterative search for a series of CompoundSolveSteps that, when applied in
* order, will cause the puzzle to be solved. Hint is effectively the same,
* except only searching for one CompoundSolveStep.
*
* When trying to discover which CompoundSolveStep to return, we need to
* generate a number of options and pick the best one. humanSolveSearcher is
* the struct that contains the information about the search for the current
* CompoundSolveStep. In keeps track of the valid CompoundSolveSteps that have
* already been found, and the in-progress CompoundSolveSteps that are not yet
* complete (that is, that have not yet found their terminating fill step).
*
* Each possible CompoundSolveStep that is being considered (both incomplete
* and complete ones) is represented by a humanSolveItem. Each humanSolveItem
* has a parent humanSolveItem, except for the special initial item. Most
* properties about a humanSolveItem are fixed at creation time. Each
* humanSolveItem has one SolveStep representing the last SolveStep in their
* chain.
*
* Each humanSolveItem has a Goodness() score, reflecting how good of an option
* it is. Lower scores are better. This score is a function of the Twiddles
* applied to this item and the twiddles applied up through its ancestor chain.
* Twiddles between 0.0 and 1.0 make a humanSolveItem more good; values between
* 1.0 and Infinity make it less good. Normally, the longer the chain of Steps,
* the higher (worse) the Goodness score.
*
* humanSolveSearcher maintains a list of completedItems that it has found--
* that is, humanSolveItems whose chain of steps represents a valid
* CompoundSolveStep (0 or more cull steps followed by a single fill step). As
* soon as a humanSolveItem is found, if it is a valid CompoundSolveStep, it is
* added to the completedItems list. As soon as the completedItems list is
* greater than options.NumOptionsToCalculate, we cease looking for more items
* and move onto step selection phase.
*
* humanSolveSearcher maintains a heap of humanSolveItems sorted by their
* Goodness. It will explore each item in order. When it explores each item, it
* derives a grid representing the current grid state mutated by all of the
* steps so far in this humanSolveItem's ancestor chain. It then searches for
* all SolveSteps that can be found at this grid state and creates
* humanSolveItems for each one, with this humanSolveItem as their parent. As
* these items are created they are either put in completedItems or
* itemsToExplore, depending on if they are complete or not. Once a
* humanSolveItem is explored it is not put back in the itemsToExplore heap.
* Goodness inverted and picked.
*
* Once humanSolveSearcher has found options.NumOptionsToCalculate
* completedItems, it goes into selection phase. It creates a
* ProbabilityDistribution with each item's Goodness() score. Then it inverts
* that distribution and uses it to pick which CompoundSolveStep to return.
*/
//humanSolveSearcherHeap is what we will use for the heap implementation in
//searcher. We put it as a seaprate time to avoid having to have
//heap.Interface methods on searcher itself, since for proper use you're not
//supposed to call those directly. So putting them on a sub-struct helps hide
//them a bit.
type humanSolveSearcherHeap []*humanSolveItem
//humanSolveSearcher keeps track of the search for a single new
//CompoundSolveStep. It keeps track of the humanSolveItems that are in-
//progress (itemsToExplore) and the items that are fully complete (that is,
//that are terminated by a FillStep and valid to return as an option).
type humanSolveSearcher struct {
itemsToExplore humanSolveSearcherHeap
completedItems []*humanSolveItem
//The number of straightforward completed items. That is,
//CompoundSolveSteps with no precursorSteps that are not Guesses. We keep
//track of this to figure out when we can early bail if we have enough of
//them.
straightforwardItemsCount int
//TODO: keep track of stats: how big the frontier was at the end of each
//CompoundSolveStep. Then provide max/mean/median.
//done will be closed when DoneSearching will return true. A convenient
//way for people to check DoneSearching without checking in a tight loop.
done chan bool
//... The hacky way to make sure we don't close an already-closed channel.
channelClosed bool
//itemsLock controls access to itemsToExplore, completedItems,
//straightforwardItemsCount, etc.
//TODO: consider having more fine-grained locks for performance.
itemsLock sync.Mutex
//Various options frozen in at creation time that various methods need
//access to.
grid Grid
options *HumanSolveOptions
previousCompoundSteps []*CompoundSolveStep
}
//twiddleRecord is a key/value pair in twiddles. We want to preserve ordering,
//so we can't use a map.
type twiddleRecord struct {
name string
value probabilityTweak
}
//humanSolveItem keeps track of in-progress CompoundSolveSteps that we're
//currently building and considering. It also maintains various metadata about
//how this item fits in the searcher. Many things about the item are frozen at
//the time of creation; many of the properties of the humanSolveItem are
//derived recursively from the parents.
type humanSolveItem struct {
//All humanSolveItem, except the initial in a searcher, must have a parent.
parent *humanSolveItem
step *SolveStep
twiddles []twiddleRecord
heapIndex int
searcher *humanSolveSearcher
cachedGrid Grid
cachedGoodness float64
//The index of the next techinque to return
techniqueIndex int
added bool
doneTwiddling bool
}
//humanSolveWorkItem represents a unit of work that should be done during the
//search.
type humanSolveWorkItem struct {
grid Grid
technique SolveTechnique
coordinator findCoordinator
}
//channelFindCoordinator implements the findCoordinator interface. It's a
//simple wrapper around the basic channel logic currently used.
type channelFindCoordinator struct {
results chan *SolveStep
done chan bool
}
//synchronousFindCoordinator implements the findCoordinator interface. It's
//basically just a thin wrapper around humanSolveSearcher. Desigend for use in
//NewSearch.
type synchronousFindCoordinator struct {
searcher *humanSolveSearcher
baseItem *humanSolveItem
}
//humanSolveHelper does most of the basic set up for both HumanSolve and Hint.
func humanSolveHelper(grid Grid, options *HumanSolveOptions, previousSteps []*CompoundSolveStep, endConditionSolved bool) *SolveDirections {
//Short circuit solving if it has multiple solutions.
if grid.HasMultipleSolutions() {
return nil
}
if options == nil {
options = DefaultHumanSolveOptions()
}
//To shave off a bit more performance, quickly check if the grid is
//already solved.
if grid.Solved() {
return nil
}
options.validate()
snapshot := grid.Copy()
var steps []*CompoundSolveStep
if endConditionSolved {
steps = humanSolveSearch(grid, options)
} else {
result := humanSolveSearchSingleStep(grid, options, previousSteps)
if result != nil {
steps = []*CompoundSolveStep{result}
}
}
if len(steps) == 0 {
return nil
}
return &SolveDirections{snapshot, steps}
}
//humanSolveSearch is a new implementation of the core implementation of
//HumanSolve. Mutates the grid.
func humanSolveSearch(grid Grid, options *HumanSolveOptions) []*CompoundSolveStep {
var result []*CompoundSolveStep
isMutableGrid := false
mGrid, ok := grid.(MutableGrid)
if ok {
isMutableGrid = true
}
//TODO: it FEELS like here we should be using read only grids. Test what
//happens if we get rid of the mutablegrid path (and modify the callers
//who expect us to mutate the grid). however, we tried doing this, and it
//added 90% to BenchmarkHumansolve. Presumably it's because we are
//creating tons of extra grids when we can just accumulate the results in
//the one item otherwise.
for !grid.Solved() {
newStep := humanSolveSearchSingleStep(grid, options, result)
if newStep == nil {
//Sad, guess we failed to solve the puzzle. :-(
return nil
}
result = append(result, newStep)
if isMutableGrid {
newStep.Apply(mGrid)
} else {
grid = grid.CopyWithModifications(newStep.Modifications())
}
}
return result
}
//humanSolveSearchSingleStep is the workhorse of the new HumanSolve. It
//searches for the next CompoundSolveStep on the puzzle: a series of steps that
//contains exactly one fill step at its end.
func humanSolveSearchSingleStep(grid Grid, options *HumanSolveOptions, previousSteps []*CompoundSolveStep) *CompoundSolveStep {
//This function doesn't do much on top of HumanSolvePossibleSteps, but
//it's worth it to mirror humanSolveSearch
steps, distribution := grid.HumanSolvePossibleSteps(options, previousSteps)
if len(steps) == 0 || len(distribution) == 0 {
return nil
}
randomIndex := distribution.RandomIndex()
return steps[randomIndex]
}
/************************************************************
*
* channelFindCoordinator implementation
*
************************************************************/
func (c *channelFindCoordinator) shouldExitEarly() bool {
select {
case <-c.done:
return true
default:
return false
}
}
func (c *channelFindCoordinator) foundResult(step *SolveStep) bool {
select {
case c.results <- step:
return false
case <-c.done:
return true
}
}
/************************************************************
*
* synchronousFindCoordinator implementation
*
************************************************************/
func (s *synchronousFindCoordinator) shouldExitEarly() bool {
return s.searcher.DoneSearching()
}
func (s *synchronousFindCoordinator) foundResult(step *SolveStep) bool {
s.baseItem.AddStep(step)
return s.shouldExitEarly()
}
/************************************************************
*
* humanSolveItem implementation
*
************************************************************/
//PreviousGrid returns a grid with all of the steps applied up to BUT NOT
//INCLUDING this items' step.
func (p *humanSolveItem) PreviousGrid() Grid {
if p.parent == nil {
return p.searcher.grid
}
return p.parent.Grid()
}
//Grid returns a grid with all of this item's steps applied
func (p *humanSolveItem) Grid() Grid {
if p.cachedGrid == nil {
var result Grid
if p.searcher.grid == nil {
result = nil
} else if p.parent == nil {
result = p.searcher.grid
} else {
result = p.parent.Grid().CopyWithModifications(p.step.Modifications())
}
p.cachedGrid = result
}
return p.cachedGrid
}
//Goodness is how good the next step chain is in total. A LOWER Goodness is better. There's not enough precision between 0.0 and
//1.0 if we try to cram all values in there and they get very small.
func (p *humanSolveItem) Goodness() float64 {
if p.parent == nil {
return 1.0
}
if p.doneTwiddling && p.cachedGoodness != 0 {
return p.cachedGoodness
}
ownAdditionFactor := probabilityTweak(0.0)
for _, twiddle := range p.twiddles {
ownAdditionFactor += twiddle.value
}
//p.cachedGoodness will be overwritten in the future if doneTwiddling is
//not yet true.
p.cachedGoodness = p.parent.Goodness() + float64(ownAdditionFactor)
return p.cachedGoodness
}
//explainGoodness returns a string explaining why this item has the goodness
//it does. Primarily useful for debugging.
func (p *humanSolveItem) explainGoodness() []string {
result := []string{
fmt.Sprintf("G:%f", p.Goodness()),
}
return append(result, p.explainGoodnessRecursive(0)...)
}
func (p *humanSolveItem) explainGoodnessRecursive(startCount int) []string {
if p.parent == nil {
return nil
}
var resultSections []string
for _, twiddle := range p.twiddles {
//0.0 values are boring, so skip them.
if twiddle.value == 0.0 {
continue
}
resultSections = append(resultSections, strconv.Itoa(startCount)+":"+twiddle.name+":"+strconv.FormatFloat(float64(twiddle.value), 'f', 4, 64))
}
parents := p.parent.explainGoodnessRecursive(startCount + 1)
if parents == nil {
return resultSections
}
if resultSections == nil {
return parents
}
return append(parents, resultSections...)
}
func (p *humanSolveItem) Steps() []*SolveStep {
//Memoizing this seems like it makes sense, but it actually leads to a ~1%
//INCREASE in HumanSolve.
if p.parent == nil {
return nil
}
return append(p.parent.Steps(), p.step)
}
//createNewItem creates a new humanSolveItem based on this step, but NOT YET
//ADDED to searcher. call item.Add() to do that.
func (p *humanSolveItem) CreateNewItem(step *SolveStep) *humanSolveItem {
result := &humanSolveItem{
parent: p,
step: step,
twiddles: nil,
heapIndex: -1,
searcher: p.searcher,
}
inProgressCompoundStep := p.Steps()
previousGrid := result.PreviousGrid()
for _, twiddler := range twiddlers {
tweak := twiddler.Twiddle(step, inProgressCompoundStep, p.searcher.previousCompoundSteps, previousGrid)
result.Twiddle(tweak, twiddler.name)
}
result.DoneTwiddling()
return result
}
//AddStep basically just does p.CreateNewItem, then item.Add()
func (p *humanSolveItem) AddStep(step *SolveStep) *humanSolveItem {
result := p.CreateNewItem(step)
p.searcher.AddItem(result)
return result
}
//DoneTwiddling should be called once no more twiddles are expected. That's
//the signal that it's OK for us to cache twiddles.
func (p *humanSolveItem) DoneTwiddling() {
p.doneTwiddling = true
}
//Twiddle modifies goodness by the given amount and keeps track of the reason
//for debugging purposes. A twiddle of 1.0 has no effect.q A twiddle between
//0.0 and 1.0 increases the goodness. A twiddle of 1.0 or greater decreases
//goodness.
func (p *humanSolveItem) Twiddle(amount probabilityTweak, description string) {
if amount < 0.0 {
return
}
//Ignore twiddles once we've been told to not expect any more.
if p.doneTwiddling {
return
}
p.twiddles = append(p.twiddles, twiddleRecord{description, amount})
p.searcher.ItemValueChanged(p)
}
func (p *humanSolveItem) String() string {
return fmt.Sprintf("%v %f %d", p.Steps(), p.Goodness(), p.heapIndex)
}
func (p *humanSolveItem) IsComplete() bool {
steps := p.Steps()
if len(steps) == 0 {
return false
}
return steps[len(steps)-1].Technique.IsFill()
}
//NextSearchWorkItem returns the next humanSolveWorkItem in this item to do:
//the techinque to run on a given grid. If no more work is left to be done,
//returns nil.
func (p *humanSolveItem) NextSearchWorkItem() *humanSolveWorkItem {
//TODO: the use of effectiveTechniquesToUse here is another nail in the
//coffin for treaing guess specially.
techniquesToUse := p.searcher.options.effectiveTechniquesToUse()
if p.techniqueIndex >= len(techniquesToUse) {
return nil
}
result := &humanSolveWorkItem{
grid: p.Grid(),
technique: techniquesToUse[p.techniqueIndex],
}
p.techniqueIndex++
return result
}
/************************************************************
*
* humanSolveSearcher implementation
*
************************************************************/
func newHumanSolveSearcher(grid Grid, previousCompoundSteps []*CompoundSolveStep, options *HumanSolveOptions) *humanSolveSearcher {
searcher := &humanSolveSearcher{
grid: grid,
options: options,
previousCompoundSteps: previousCompoundSteps,
done: make(chan bool),
}
heap.Init(&searcher.itemsToExplore)
initialItem := &humanSolveItem{
searcher: searcher,
heapIndex: -1,
}
heap.Push(&searcher.itemsToExplore, initialItem)
return searcher
}
//Injects this item into to the searcher.
func (n *humanSolveSearcher) AddItem(item *humanSolveItem) {
//make sure we only add items once.
if item.added {
return
}
item.added = true
n.itemsLock.Lock()
if item.IsComplete() {
n.completedItems = append(n.completedItems, item)
if item.step.Technique != GuessTechnique {
n.straightforwardItemsCount++
}
} else {
heap.Push(&n.itemsToExplore, item)
}
n.itemsLock.Unlock()
}
func (n *humanSolveSearcher) ItemValueChanged(item *humanSolveItem) {
if item.heapIndex < 0 {
return
}
n.itemsLock.Lock()
heap.Fix(&n.itemsToExplore, item.heapIndex)
n.itemsLock.Unlock()
}
//DoneSearching will return true when no more items need to be explored
//because we have enough CompletedItems.
func (n *humanSolveSearcher) DoneSearching() bool {
if n.options == nil {
close(n.done)
return true
}
n.itemsLock.Lock()
lenCompletedItems := len(n.completedItems)
n.itemsLock.Unlock()
result := n.options.NumOptionsToCalculate <= lenCompletedItems
if result {
n.signalDone()
}
return result
}
func (n *humanSolveSearcher) signalDone() {
n.itemsLock.Lock()
if !n.channelClosed {
close(n.done)
n.channelClosed = true
}
n.itemsLock.Unlock()
}
//NextPossibleStep pops the best step and returns it.
func (n *humanSolveSearcher) NextPossibleStep() *humanSolveItem {
if n.itemsToExplore.Len() == 0 {
return nil
}
n.itemsLock.Lock()
result := heap.Pop(&n.itemsToExplore).(*humanSolveItem)
n.itemsLock.Unlock()
return result
}
//Search is the main workhorse of HumanSolve Search, which explores all of
//the itemsToExplore (potentially bailing early if enough completed items are
//found). When Search is done, searcher.completedItems will contain the
//possibilities to choose from.
func (n *humanSolveSearcher) Search() {
/*
The pipeline starts by generating humanSolveWorkItems, and at the
end collects generated CompoundSolveSteps and puts them in
searcher.completedItems.
The pipeline continues until one of the following things are true:
1) No more work items will be generated. This is reasonably rare
in practice, because as long as Guess is in the set of
TechniquesToUse there will almost always be SOME item. When this
shuts down the pipeline is already mostly idle anyway so it's just
a matter of tidying up. However, this will always happen in the
last few steps of solving a puzzle when there's only one move to
make anyway.
2) We have at least NumItemsToCompute items in
searcher.completedItems (or some other more complex early exit
logic is true) and thus can exit early. When this happens the
pipeline is roaring through all of the work and needs to signal
all pieces to shut down. We handle this by defering a close to
allDone in this method and then just returning.
The pipeline consists of the following go Routines:
1) A routine to generate humanSolveWorkItems. It loops through
searcher.NextPossibleStep, and for each one of those loops through
NextWorkItem until none are left. It sends workItems down the
channel to the next stage. Once there are no more steps it closes
the outbound channel, signalling to the rest of the pipeline to
exit in Exit Condition #1. If it can receive from the allDone
channel, that means that Exit Condition #2 is met and it should
begin an early shutdown and close its output channel.
Each work item contains the grid to operate on, the technique, and
a coordinator that is specific to this baseItem.
2) A series of N worker threads that take an item off of
workItems, run the technique, and then run another one. The
coordinator in the work item synchronously adds the result to the
searcher . If workItems is closed they immediately exit. The
Technique.Find() methods will early exit if the coordinator tells
them to (and it will do so faster given its synchronous nature)
On the main thread we watch for either the searcher to signal that
DoneSearching is called (by closing its done channel), or for all
of the solver threads to have quit, signaling that all of the
techniques have been exhausted.
*/
//TODO: make sure that Guess will return at least one guess item in all
//cases, but never will go above the normal rank of 2 unless there are
//none of size 2. This will require a new test. Note that all guesses are
//infinitely bad, which means that guess on a cell of rank 2 and guess on
//a cell of rank 3 will be equally bad, making it more important to only
//return cells of the lowest rank.
//TODO: make this configurable
//TODO: test if this is faster on devices with other numbers of cores or
//if it's just tuned to the Mac Pro.
runtime.GOMAXPROCS(runtime.NumCPU())
numFindThreads := runtime.GOMAXPROCS(0)/2 - 1
if numFindThreads < 2 {
numFindThreads = 2
}
workItems := make(chan *humanSolveWorkItem)
//The thread to generate work items
go humanSolveSearcherWorkItemGenerator(n, workItems)
var solveThreadsDone sync.WaitGroup
solveThreadsDone.Add(numFindThreads)
for i := 0; i < numFindThreads; i++ {
go humanSolveSearcherFindThread(workItems, &solveThreadsDone)
}
allSolveThreadsDone := make(chan bool)
//Convert the wait group into a channel send for convenience of using
//select{} below.
go func() {
solveThreadsDone.Wait()
allSolveThreadsDone <- true
}()
//Wait for either all solve threads to have finished (meaning they have
//found everything they're going to find) or for ourselves to have
//signaled that we reached DoneSearching().
select {
case <-allSolveThreadsDone:
case <-n.done:
}
}
//humanSolveSearcherFindThread is a thread that takes in workItems and runs
//the specified technique on the specified grid.
func humanSolveSearcherFindThread(workItems chan *humanSolveWorkItem, wg *sync.WaitGroup) {
for workItem := range workItems {
workItem.technique.find(workItem.grid, workItem.coordinator)
}
wg.Done()
}
//humanSolveSearcherWorkItemGenerator is used in searcher.Search to generate
//the stream of WorkItems.
func humanSolveSearcherWorkItemGenerator(searcher *humanSolveSearcher, workItems chan *humanSolveWorkItem) {
//When we return close down workItems to signal downstream things to
//close.
defer close(workItems)
//We'll loop through each step in searcher, and then for each step
//generate a work item per technique.
item := searcher.NextPossibleStep()
for item != nil {
coordinator := &synchronousFindCoordinator{
searcher: searcher,
baseItem: item,
}
workItem := item.NextSearchWorkItem()
for workItem != nil {
//Tell each workItem where to send its results
workItem.coordinator = coordinator
select {
case workItems <- workItem:
case <-searcher.done:
return
}
workItem = item.NextSearchWorkItem()
}
item = searcher.NextPossibleStep()
}
}
//String prints out a useful debug output for the searcher's state.
func (n *humanSolveSearcher) String() string {
result := "Items:" + strconv.Itoa(len(n.itemsToExplore)) + "\n"
result += "Completed:" + strconv.Itoa(len(n.completedItems)) + "\n"
result += "[\n"
for _, item := range n.itemsToExplore {
result += item.String() + "\n"
}
result += "]\n"
return result
}
/************************************************************
*
* humanSolveSearcherHeap implementation
*
************************************************************/
//Len is necessary to implement heap.Interface
func (n humanSolveSearcherHeap) Len() int {
return len(n)
}
//Less is necessary to implement heap.Interface
func (n humanSolveSearcherHeap) Less(i, j int) bool {
return n[i].Goodness() < n[j].Goodness()
}
//Swap is necessary to implement heap.Interface
func (n humanSolveSearcherHeap) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
n[i].heapIndex = i
n[j].heapIndex = j
}
//Push is necessary to implement heap.Interface. It should not be used
//direclty; instead, use heap.Push()
func (n *humanSolveSearcherHeap) Push(x interface{}) {
length := len(*n)
item := x.(*humanSolveItem)
item.heapIndex = length
*n = append(*n, item)
}
//Pop is necessary to implement heap.Interface. It should not be used
//directly; instead use heap.Pop()
func (n *humanSolveSearcherHeap) Pop() interface{} {
old := *n
length := len(old)
item := old[length-1]
item.heapIndex = -1 // for safety
*n = old[0 : length-1]
return item
}
func humanSolvePossibleStepsImpl(grid Grid, options *HumanSolveOptions, previousSteps []*CompoundSolveStep) (steps []*CompoundSolveStep, distribution ProbabilityDistribution) {
//TODO: with the new approach, we're getting a lot more extreme negative difficulty values. Train a new model!
//We send a copy here because our own selves will likely be modified soon
//after returning from this, and if the other threads haven't gotten the
//signal yet to shut down they might get in a weird state.
searcher := newHumanSolveSearcher(grid, previousSteps, options)
searcher.Search()
//Prepare the distribution and list of steps
//But first check if we don't have any.
if len(searcher.completedItems) == 0 {
return nil, nil
}
//Get a consistent snapshot of completedItems; its length might change.
completedItems := searcher.completedItems
distri := make(ProbabilityDistribution, len(completedItems))
var resultSteps []*CompoundSolveStep
for i, item := range completedItems {
distri[i] = item.Goodness()
compoundStep := newCompoundSolveStep(item.Steps())
compoundStep.explanation = item.explainGoodness()
resultSteps = append(resultSteps, compoundStep)
}
invertedDistribution := distri.invert()
return resultSteps, invertedDistribution
}
func (self *gridImpl) HumanSolvePossibleSteps(options *HumanSolveOptions, previousSteps []*CompoundSolveStep) (steps []*CompoundSolveStep, distribution ProbabilityDistribution) {
return humanSolvePossibleStepsImpl(self, options, previousSteps)
}
func (self *mutableGridImpl) HumanSolvePossibleSteps(options *HumanSolveOptions, previousSteps []*CompoundSolveStep) (steps []*CompoundSolveStep, distribution ProbabilityDistribution) {
return humanSolvePossibleStepsImpl(self.Copy(), options, previousSteps)
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precheck
import (
"context"
)
// CheckType represents the check type.
type CheckType string
// CheckType constants.
const (
Critical CheckType = "critical"
Warn CheckType = "performance"
)
// CheckItemID is the ID of a precheck item
type CheckItemID string
// CheckItemID constants
const (
CheckLargeDataFile CheckItemID = "CHECK_LARGE_DATA_FILES"
CheckSourcePermission CheckItemID = "CHECK_SOURCE_PERMISSION"
CheckTargetTableEmpty CheckItemID = "CHECK_TARGET_TABLE_EMPTY"
CheckSourceSchemaValid CheckItemID = "CHECK_SOURCE_SCHEMA_VALID"
CheckCheckpoints CheckItemID = "CHECK_CHECKPOINTS"
CheckCSVHeader CheckItemID = "CHECK_CSV_HEADER"
CheckTargetClusterSize CheckItemID = "CHECK_TARGET_CLUSTER_SIZE"
CheckTargetClusterEmptyRegion CheckItemID = "CHECK_TARGET_CLUSTER_EMPTY_REGION"
CheckTargetClusterRegionDist CheckItemID = "CHECK_TARGET_CLUSTER_REGION_DISTRIBUTION"
CheckTargetClusterVersion CheckItemID = "CHECK_TARGET_CLUSTER_VERSION"
CheckLocalDiskPlacement CheckItemID = "CHECK_LOCAL_DISK_PLACEMENT"
CheckLocalTempKVDir CheckItemID = "CHECK_LOCAL_TEMP_KV_DIR"
CheckTargetUsingCDCPITR CheckItemID = "CHECK_TARGET_USING_CDC_PITR"
)
var (
// CheckItemIDToDisplayName is a map from CheckItemID to its display name
checkItemIDToDisplayName = map[CheckItemID]string{
CheckLargeDataFile: "Large data file",
CheckSourcePermission: "Source permission",
CheckTargetTableEmpty: "Target table empty",
CheckSourceSchemaValid: "Source schema valid",
CheckCheckpoints: "Checkpoints",
CheckCSVHeader: "CSV header",
CheckTargetClusterSize: "Target cluster size",
CheckTargetClusterEmptyRegion: "Target cluster empty region",
CheckTargetClusterRegionDist: "Target cluster region dist",
CheckTargetClusterVersion: "Target cluster version",
CheckLocalDiskPlacement: "Local disk placement",
CheckLocalTempKVDir: "Local temp KV dir",
CheckTargetUsingCDCPITR: "Target using CDC/PITR",
}
)
// DisplayName returns display name for it.
func (c CheckItemID) DisplayName() string {
return checkItemIDToDisplayName[c]
}
// CheckResult is the result of a precheck item
type CheckResult struct {
Item CheckItemID
Severity CheckType
Passed bool
Message string
}
// Checker is the interface for precheck items
type Checker interface {
// Check checks whether it meet some prerequisites for importing
// If the check is skipped, the returned `CheckResult` is nil
Check(ctx context.Context) (*CheckResult, error)
GetCheckItemID() CheckItemID
}
|
//go:generate protoc -I../proto --go_out=plugins=grpc:../proto ../proto/echo.proto
//go:generate protoc -I../proto --swagger_out=logtostderr=true:../proto ../proto/echo.proto
package main
import (
"bytes"
"context"
"flag"
"fmt"
"log"
"net"
"os"
pb "github.com/ginuerzh/echo/proto"
svc1 "github.com/ginuerzh/svc1/proto"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/reflection"
status "google.golang.org/grpc/status"
)
var (
addr = ":8080"
svc1Addr = "svc1.echo:8080" // k8s service name for svc1
)
func init() {
flag.StringVar(&addr, "l", ":8080", "grpc server address")
flag.Parse()
}
type echoServer struct {
svc1Client svc1.Svc1Client
}
func (s *echoServer) Echo(ctx context.Context, in *pb.EchoRequest) (*pb.EchoReply, error) {
r, err := s.svc1Client.Serve(ctx, &svc1.Svc1Request{
Request: in.Request,
})
if err != nil {
log.Println(err)
return nil, err
}
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
b := bytes.Buffer{}
fmt.Fprintf(&b, "[echo] from %s, ", hostname)
b.WriteString(r.GetReply() + ", ")
return &pb.EchoReply{
Reply: b.String(),
}, nil
}
func (s *echoServer) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {
return &grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_SERVING,
}, nil
}
func (s *echoServer) Watch(req *grpc_health_v1.HealthCheckRequest, srv grpc_health_v1.Health_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
func main() {
ctx := context.Background()
svc1Conn, err := grpc.DialContext(ctx, svc1Addr, grpc.WithInsecure())
if err != nil {
log.Fatalf("failed to dial svc1: %v", err)
}
srv := &echoServer{
svc1Client: svc1.NewSvc1Client(svc1Conn),
}
s := grpc.NewServer(
grpc_middleware.WithUnaryServerChain(
unaryServerRecoveryInterceptor(),
// unaryServerOpenTracingInterceptor(tracer),
// unaryServerAuthInterceptor(),
unaryServerLoggingInterceptor(),
),
)
pb.RegisterEchoServer(s, srv)
grpc_health_v1.RegisterHealthServer(s, srv)
reflection.Register(s)
ln, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.Println("server listen on", addr)
if err := s.Serve(ln); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
package gojson
import (
"encoding/json"
"fmt"
"reflect"
"strings"
)
// Unmarshal has no documentation
func Unmarshal(data []byte, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return &json.InvalidUnmarshalError{Type: reflect.TypeOf(v)}
}
dec := new(decoder)
return dec.unmarshal(data, rv)
}
type decoder struct {
data []byte
}
type property struct {
Field reflect.StructField
Value reflect.Value
}
func collectProperties(v reflect.Value, properties map[string]property) UnknownProperties {
var unknownProperties UnknownProperties
for i := 0; i < v.NumField(); i++ {
field := v.Type().Field(i)
if field.Anonymous {
unknownProperties = collectProperties(v.Field(i), properties)
}
}
for i := 0; i < v.NumField(); i++ {
field := v.Type().Field(i)
if field.Anonymous {
continue
}
if !startsWithUpper(field.Name) {
continue
}
propName := field.Name
propName, _ = evalTag(field)
if propName == "-" {
// TODO: pointer to UnknownProperties should also be allowed
if field.Type == tUnknownProperties {
unknownProperties = v.Field(i).Interface().(UnknownProperties)
}
continue
}
properties[propName] = property{field, v.Field(i)}
}
return unknownProperties
}
func (d *decoder) unmarshal(data []byte, rv reflect.Value) error {
var err error
switch rv.Type().Kind() {
case reflect.Ptr:
if rv.IsNil() {
rv.Set(reflect.New(rv.Type().Elem()))
}
return d.unmarshal(data, rv.Elem())
case reflect.String:
var v string
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v).Convert(rv.Type()))
return nil
case reflect.Bool:
var v bool
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Int:
var v int
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Int8:
var v int8
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Int16:
var v int16
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Int32:
var v int32
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Int64:
var v int64
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Uint:
var v uint
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Uint8:
var v uint8
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Uint16:
var v uint16
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Uint32:
var v uint32
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Uint64:
var v uint64
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Float32:
var v float32
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Float64:
var v float64
if err = json.Unmarshal(data, &v); err != nil {
return err
}
rv.Set(reflect.ValueOf(v))
return nil
case reflect.Slice:
var err error
rawEntries := []json.RawMessage{}
if err = json.Unmarshal(data, &rawEntries); err != nil {
return err
}
rv.Set(reflect.MakeSlice(rv.Type(), len(rawEntries), len(rawEntries)))
for i := 0; i < rv.Len(); i++ {
if err = d.unmarshal(rawEntries[i], rv.Index(i)); err != nil {
return err
}
}
return nil
case reflect.Struct:
var err error
properties := map[string]property{}
collectProperties(rv, properties)
rawDatas := map[string]json.RawMessage{}
if err = json.Unmarshal(data, &rawDatas); err != nil {
return err
}
for k, jsonRawMessage := range rawDatas {
if property, ok := properties[k]; ok {
if err = d.unmarshal(jsonRawMessage, property.Value); err != nil {
return err
}
}
}
return nil
case reflect.Interface:
baseType := findBaseType(rv)
if baseType == nil {
panic(fmt.Errorf("Unsupported type %v (kind: %v) - base type not found", rv.Type(), rv.Type().Kind()))
}
implementors := findImplementors(baseType)
if implementors == nil {
panic(fmt.Errorf("Unsupported type %v (kind: %v) - implementors not found", rv.Type(), rv.Type().Kind()))
}
rawProperties := map[string]json.RawMessage{}
if err = json.Unmarshal(data, &rawProperties); err != nil {
return err
}
for _, implementor := range implementors {
key, value := discriminator(reflect.TypeOf(implementor))
if key == "" || value == "" {
continue
}
if rawValue, found := rawProperties[key]; found {
decoded := string(rawValue)
decoded = strings.TrimSpace(decoded[1 : len(decoded)-1])
if value == decoded {
inst := reflect.New(reflect.TypeOf(implementor).Elem())
if err = d.unmarshal(data, inst); err != nil {
return err
}
rv.Set(inst)
return nil
}
}
}
inst := reflect.New(baseType.Elem())
if err = d.unmarshal(data, inst); err != nil {
return err
}
rv.Set(inst)
return nil
// fmt.Println("----------")
// for _, implementor := range implementors {
// key, value := discriminator(reflect.TypeOf(implementor))
// fmt.Println(" ", key, value, reflect.TypeOf(implementor))
// }
// fmt.Println("----------")
// panic(fmt.Errorf("found no implementor: %v", string(data)))
default:
panic(fmt.Errorf("Unsupported type %v (kind: %v)", rv.Type(), rv.Type().Kind()))
// panic(&json.InvalidUnmarshalError{Type: rv.Type()})
// return &json.InvalidUnmarshalError{Type: rv.Type()}
}
}
func discriminator(t reflect.Type) (string, string) {
t = t.Elem()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if !field.Anonymous {
continue
}
var jsonTag string
var found bool
if jsonTag, found = field.Tag.Lookup("json"); found {
if !strings.HasPrefix(jsonTag, ",") {
continue
}
jsonTag = jsonTag[1:]
if !strings.Contains(jsonTag, "=") {
continue
}
parts := strings.Split(jsonTag, "=")
return parts[0], parts[1]
}
}
return "", ""
}
func findImplementors(baseType reflect.Type) []interface{} {
for i := 0; i < baseType.NumMethod(); i++ {
method := baseType.Method(i)
if method.Name == "Implementors" {
if method.Type.NumOut() != 1 {
return nil
}
if method.Type.NumIn() != 1 {
fmt.Println(method.Type.In(0))
return nil
}
retVal := reflect.New(baseType).Elem().Method(i).Call([]reflect.Value{})[0]
result := []interface{}{}
for j := 0; j < retVal.Len(); j++ {
result = append(result, retVal.Index(j).Interface())
}
return result
}
}
return nil
}
func findBaseType(rv reflect.Value) reflect.Type {
t := rv.Type()
for i := 0; i < t.NumMethod(); i++ {
method := t.Method(i)
if method.Name == "Initialize" {
if method.Type.NumOut() != 0 {
return nil
}
if method.Type.NumIn() != 1 {
return nil
}
return method.Type.In(0)
}
}
return nil
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/networkservices/beta/networkservices_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta"
)
// Server implements the gRPC interface for EndpointConfigSelector.
type EndpointConfigSelectorServer struct{}
// ProtoToEndpointConfigSelectorTypeEnum converts a EndpointConfigSelectorTypeEnum enum from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorTypeEnum(e betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum) *beta.EndpointConfigSelectorTypeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum_name[int32(e)]; ok {
e := beta.EndpointConfigSelectorTypeEnum(n[len("NetworkservicesBetaEndpointConfigSelectorTypeEnum"):])
return &e
}
return nil
}
// ProtoToEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum enum from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(e betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum) *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum {
if e == 0 {
return nil
}
if n, ok := betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum_name[int32(e)]; ok {
e := beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(n[len("NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum"):])
return &e
}
return nil
}
// ProtoToEndpointConfigSelectorHttpFilters converts a EndpointConfigSelectorHttpFilters resource from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorHttpFilters(p *betapb.NetworkservicesBetaEndpointConfigSelectorHttpFilters) *beta.EndpointConfigSelectorHttpFilters {
if p == nil {
return nil
}
obj := &beta.EndpointConfigSelectorHttpFilters{}
for _, r := range p.GetHttpFilters() {
obj.HttpFilters = append(obj.HttpFilters, r)
}
return obj
}
// ProtoToEndpointConfigSelectorEndpointMatcher converts a EndpointConfigSelectorEndpointMatcher resource from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcher(p *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher) *beta.EndpointConfigSelectorEndpointMatcher {
if p == nil {
return nil
}
obj := &beta.EndpointConfigSelectorEndpointMatcher{
MetadataLabelMatcher: ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(p.GetMetadataLabelMatcher()),
}
return obj
}
// ProtoToEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher resource from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(p *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher) *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher {
if p == nil {
return nil
}
obj := &beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher{
MetadataLabelMatchCriteria: ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(p.GetMetadataLabelMatchCriteria()),
}
for _, r := range p.GetMetadataLabels() {
obj.MetadataLabels = append(obj.MetadataLabels, *ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(r))
}
return obj
}
// ProtoToEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels resource from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(p *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels) *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels {
if p == nil {
return nil
}
obj := &beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels{
LabelName: dcl.StringOrNil(p.LabelName),
LabelValue: dcl.StringOrNil(p.LabelValue),
}
return obj
}
// ProtoToEndpointConfigSelectorTrafficPortSelector converts a EndpointConfigSelectorTrafficPortSelector resource from its proto representation.
func ProtoToNetworkservicesBetaEndpointConfigSelectorTrafficPortSelector(p *betapb.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector) *beta.EndpointConfigSelectorTrafficPortSelector {
if p == nil {
return nil
}
obj := &beta.EndpointConfigSelectorTrafficPortSelector{}
for _, r := range p.GetPorts() {
obj.Ports = append(obj.Ports, r)
}
return obj
}
// ProtoToEndpointConfigSelector converts a EndpointConfigSelector resource from its proto representation.
func ProtoToEndpointConfigSelector(p *betapb.NetworkservicesBetaEndpointConfigSelector) *beta.EndpointConfigSelector {
obj := &beta.EndpointConfigSelector{
Name: dcl.StringOrNil(p.Name),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
Type: ProtoToNetworkservicesBetaEndpointConfigSelectorTypeEnum(p.GetType()),
AuthorizationPolicy: dcl.StringOrNil(p.AuthorizationPolicy),
HttpFilters: ProtoToNetworkservicesBetaEndpointConfigSelectorHttpFilters(p.GetHttpFilters()),
EndpointMatcher: ProtoToNetworkservicesBetaEndpointConfigSelectorEndpointMatcher(p.GetEndpointMatcher()),
TrafficPortSelector: ProtoToNetworkservicesBetaEndpointConfigSelectorTrafficPortSelector(p.GetTrafficPortSelector()),
Description: dcl.StringOrNil(p.Description),
ServerTlsPolicy: dcl.StringOrNil(p.ServerTlsPolicy),
ClientTlsPolicy: dcl.StringOrNil(p.ClientTlsPolicy),
Project: dcl.StringOrNil(p.Project),
Location: dcl.StringOrNil(p.Location),
}
return obj
}
// EndpointConfigSelectorTypeEnumToProto converts a EndpointConfigSelectorTypeEnum enum to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorTypeEnumToProto(e *beta.EndpointConfigSelectorTypeEnum) betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum {
if e == nil {
return betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum(0)
}
if v, ok := betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum_value["EndpointConfigSelectorTypeEnum"+string(*e)]; ok {
return betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum(v)
}
return betapb.NetworkservicesBetaEndpointConfigSelectorTypeEnum(0)
}
// EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnumToProto converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum enum to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnumToProto(e *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum) betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum {
if e == nil {
return betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(0)
}
if v, ok := betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum_value["EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum"+string(*e)]; ok {
return betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(v)
}
return betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(0)
}
// EndpointConfigSelectorHttpFiltersToProto converts a EndpointConfigSelectorHttpFilters resource to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorHttpFiltersToProto(o *beta.EndpointConfigSelectorHttpFilters) *betapb.NetworkservicesBetaEndpointConfigSelectorHttpFilters {
if o == nil {
return nil
}
p := &betapb.NetworkservicesBetaEndpointConfigSelectorHttpFilters{}
for _, r := range o.HttpFilters {
p.HttpFilters = append(p.HttpFilters, r)
}
return p
}
// EndpointConfigSelectorEndpointMatcherToProto converts a EndpointConfigSelectorEndpointMatcher resource to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorEndpointMatcherToProto(o *beta.EndpointConfigSelectorEndpointMatcher) *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher {
if o == nil {
return nil
}
p := &betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher{
MetadataLabelMatcher: NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherToProto(o.MetadataLabelMatcher),
}
return p
}
// EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherToProto converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher resource to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherToProto(o *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher) *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher {
if o == nil {
return nil
}
p := &betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher{
MetadataLabelMatchCriteria: NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnumToProto(o.MetadataLabelMatchCriteria),
}
for _, r := range o.MetadataLabels {
p.MetadataLabels = append(p.MetadataLabels, NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsToProto(&r))
}
return p
}
// EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsToProto converts a EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels resource to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsToProto(o *beta.EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels) *betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels {
if o == nil {
return nil
}
p := &betapb.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels{
LabelName: dcl.ValueOrEmptyString(o.LabelName),
LabelValue: dcl.ValueOrEmptyString(o.LabelValue),
}
return p
}
// EndpointConfigSelectorTrafficPortSelectorToProto converts a EndpointConfigSelectorTrafficPortSelector resource to its proto representation.
func NetworkservicesBetaEndpointConfigSelectorTrafficPortSelectorToProto(o *beta.EndpointConfigSelectorTrafficPortSelector) *betapb.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector {
if o == nil {
return nil
}
p := &betapb.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector{}
for _, r := range o.Ports {
p.Ports = append(p.Ports, r)
}
return p
}
// EndpointConfigSelectorToProto converts a EndpointConfigSelector resource to its proto representation.
func EndpointConfigSelectorToProto(resource *beta.EndpointConfigSelector) *betapb.NetworkservicesBetaEndpointConfigSelector {
p := &betapb.NetworkservicesBetaEndpointConfigSelector{
Name: dcl.ValueOrEmptyString(resource.Name),
CreateTime: dcl.ValueOrEmptyString(resource.CreateTime),
UpdateTime: dcl.ValueOrEmptyString(resource.UpdateTime),
Type: NetworkservicesBetaEndpointConfigSelectorTypeEnumToProto(resource.Type),
AuthorizationPolicy: dcl.ValueOrEmptyString(resource.AuthorizationPolicy),
HttpFilters: NetworkservicesBetaEndpointConfigSelectorHttpFiltersToProto(resource.HttpFilters),
EndpointMatcher: NetworkservicesBetaEndpointConfigSelectorEndpointMatcherToProto(resource.EndpointMatcher),
TrafficPortSelector: NetworkservicesBetaEndpointConfigSelectorTrafficPortSelectorToProto(resource.TrafficPortSelector),
Description: dcl.ValueOrEmptyString(resource.Description),
ServerTlsPolicy: dcl.ValueOrEmptyString(resource.ServerTlsPolicy),
ClientTlsPolicy: dcl.ValueOrEmptyString(resource.ClientTlsPolicy),
Project: dcl.ValueOrEmptyString(resource.Project),
Location: dcl.ValueOrEmptyString(resource.Location),
}
return p
}
// ApplyEndpointConfigSelector handles the gRPC request by passing it to the underlying EndpointConfigSelector Apply() method.
func (s *EndpointConfigSelectorServer) applyEndpointConfigSelector(ctx context.Context, c *beta.Client, request *betapb.ApplyNetworkservicesBetaEndpointConfigSelectorRequest) (*betapb.NetworkservicesBetaEndpointConfigSelector, error) {
p := ProtoToEndpointConfigSelector(request.GetResource())
res, err := c.ApplyEndpointConfigSelector(ctx, p)
if err != nil {
return nil, err
}
r := EndpointConfigSelectorToProto(res)
return r, nil
}
// ApplyEndpointConfigSelector handles the gRPC request by passing it to the underlying EndpointConfigSelector Apply() method.
func (s *EndpointConfigSelectorServer) ApplyNetworkservicesBetaEndpointConfigSelector(ctx context.Context, request *betapb.ApplyNetworkservicesBetaEndpointConfigSelectorRequest) (*betapb.NetworkservicesBetaEndpointConfigSelector, error) {
cl, err := createConfigEndpointConfigSelector(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyEndpointConfigSelector(ctx, cl, request)
}
// DeleteEndpointConfigSelector handles the gRPC request by passing it to the underlying EndpointConfigSelector Delete() method.
func (s *EndpointConfigSelectorServer) DeleteNetworkservicesBetaEndpointConfigSelector(ctx context.Context, request *betapb.DeleteNetworkservicesBetaEndpointConfigSelectorRequest) (*emptypb.Empty, error) {
cl, err := createConfigEndpointConfigSelector(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteEndpointConfigSelector(ctx, ProtoToEndpointConfigSelector(request.GetResource()))
}
// ListNetworkservicesBetaEndpointConfigSelector handles the gRPC request by passing it to the underlying EndpointConfigSelectorList() method.
func (s *EndpointConfigSelectorServer) ListNetworkservicesBetaEndpointConfigSelector(ctx context.Context, request *betapb.ListNetworkservicesBetaEndpointConfigSelectorRequest) (*betapb.ListNetworkservicesBetaEndpointConfigSelectorResponse, error) {
cl, err := createConfigEndpointConfigSelector(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListEndpointConfigSelector(ctx, request.Project, request.Location)
if err != nil {
return nil, err
}
var protos []*betapb.NetworkservicesBetaEndpointConfigSelector
for _, r := range resources.Items {
rp := EndpointConfigSelectorToProto(r)
protos = append(protos, rp)
}
return &betapb.ListNetworkservicesBetaEndpointConfigSelectorResponse{Items: protos}, nil
}
func createConfigEndpointConfigSelector(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package main
import (
"auth/security"
"auth/user"
"encoding/json"
"log"
"net/http"
"os"
"time"
"github.com/codegangsta/negroni"
"github.com/gorilla/context"
"github.com/gorilla/mux"
)
func main() {
uService := user.NewService()
r := mux.NewRouter()
//handlers
n := negroni.New(
negroni.NewLogger(),
)
r.Handle("/v1/auth", n.With(
negroni.Wrap(userAuth(uService)),
)).Methods("POST", "OPTIONS")
r.Handle("/v1/validate-token", n.With(
negroni.Wrap(validateToken()),
)).Methods("POST", "OPTIONS")
http.Handle("/", r)
logger := log.New(os.Stderr, "logger: ", log.Lshortfile)
srv := &http.Server{
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
Addr: ":8081", //@TODO usar variável de ambiente
Handler: context.ClearHandler(http.DefaultServeMux),
ErrorLog: logger,
}
err := srv.ListenAndServe()
if err != nil {
panic(err)
}
}
func userAuth(uService user.UseCase) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var param struct {
Email string `json:"email"`
Password string `json:"password"`
}
err := json.NewDecoder(r.Body).Decode(¶m)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
err = uService.ValidateUser(param.Email, param.Password)
if err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
var result struct {
Token string `json:"token"`
}
result.Token, err = security.NewToken(param.Email)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(result); err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
return
})
}
func validateToken() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var param struct {
Token string `json:"token"`
}
err := json.NewDecoder(r.Body).Decode(¶m)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
t, err := security.ParseToken(param.Token)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
tData, err := security.GetClaims(t)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
var result struct {
Email string `json:"email"`
}
result.Email = tData["email"].(string)
if err := json.NewEncoder(w).Encode(result); err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
return
})
}
|
package gadk
import (
"testing"
)
const apiServer = "http://78.46.250.88:15555"
var (
seed Trytes = "VOQHWAPIKQNYQZRYRMJIYSLPBVLFOTPJMQKKNYDANFTG9ICYDLRUJPCDDWDLD9YEGIKISSHWWHKOWONMN"
)
// TODO Fix
func TestTransfer1(t *testing.T) {
var err error
var adr Address
var adrs []Address
for i := 0; i < 5; i++ {
api := NewAPI(apiServer, nil)
adr, adrs, err = GetUsedAddress(api, seed, 2)
if err == nil {
break
}
}
if err != nil {
t.Error(err)
}
t.Log(adr, adrs)
if len(adrs) < 1 {
t.Error("GetUsedAddress is incorrect")
}
var bal Balances
for i := 0; i < 5; i++ {
api := NewAPI(apiServer, nil)
_, bal, err = GetInputs(api, seed, 0, 100, 1000, 2)
if err == nil {
break
}
}
if err != nil {
t.Error(err)
}
t.Log(bal)
if len(bal) < 1 {
t.Error("GetInputs is incorrect")
}
}
// TODO Fix
func TestTransfer2(t *testing.T) {
var err error
trs := []Transfer{
{
Address: "ZTBTQDHNZBVJXOJSIMQPUHQORZFALAHWRBYJQMRFTVSDDRLICVGBOEEXIJSMNNSWEVICVAMEZPBVASNSETGEIMKSGA",
Value: 20,
Tag: "MOUDAMEPO",
},
}
var bdl Bundle
for i := 0; i < 5; i++ {
api := NewAPI(apiServer, nil)
bdl, err = PrepareTransfers(api, seed, trs, nil, "", 2)
if err == nil {
break
}
}
if err != nil {
t.Error(err)
}
if len(bdl) < 3 {
for _, tx := range bdl {
t.Log(tx.Trytes())
}
t.Fatal("PrepareTransfers is incorrect len(bdl)=", len(bdl))
}
if err = bdl.IsValid(); err != nil {
t.Error(err)
}
name, pow := GetBestPoW()
t.Log("using PoW: ", name)
for i := 0; i < 5; i++ {
api := NewAPI(apiServer, nil)
bdl, err = Send(api, seed, 2, trs, pow)
if err == nil {
break
}
}
if err != nil {
t.Error(err)
}
for _, tx := range bdl {
t.Log(tx.Trytes())
}
}
|
package main
import (
"fmt"
"sort"
)
func main() {
var arr [3]int
fmt.Printf("%v - %T", arr, arr)
arrChange(arr, 1, 100)
fmt.Println()
fmt.Printf("%v - %T", arr, arr)
fmt.Println()
var s1 []int
fmt.Printf("%v - %T", s1, s1)
fmt.Println()
fmt.Printf("is s1 is a nil? %v", s1 == nil)
fmt.Println()
fmt.Printf("len %v, cap %v", len(s1), cap(s1))
s1 = arr[0:2]
fmt.Println()
fmt.Printf("%v - %T", s1, s1)
fmt.Println()
fmt.Printf("len %v, cap %v", len(s1), cap(s1))
s1 = append(s1, 3)
fmt.Println()
fmt.Printf("len %v, cap %v", len(s1), cap(s1))
fmt.Println()
fmt.Printf("%v - %T, %v - %T, ", s1, s1, arr, arr)
s2 := make([]int, 0, 5)
s2 = appendN(s2, 3, 2)
fmt.Println(s2)
appendSizes()
s3 := make([]int, 2)
fmt.Println("is s3 is a nil?", s3 == nil)
//copy(s3, s1)
//fmt.Println("s3", s3)
//fmt.Println("s1", s1)
//var s4 []int
fmt.Println("SDASD", concat([]int{1, 3, 4}, []int{2}))
fmt.Println(median([]float64{1, 2, 3}))
fmt.Println(median([]float64{1, 2, 3, 4}))
}
func appendN(vals []int, val, n int) []int {
for i := 0; i < n; i++ {
vals = append(vals, n)
}
return vals
}
// array is passed by value!!
func arrChange(arr [3]int, i, val int) {
arr[i] = val
}
// slice is passed by ref!!
func sliceChange(arr []int, i, val int) {
arr[i] = val
}
func appendSizes() {
currCap := 0
var s []int
for i := 0; i < 15000; i++ {
s = append(s, i)
if cap(s) != currCap {
fmt.Println(currCap, "->", cap(s), cap(s)-currCap)
currCap = cap(s)
}
}
}
func concat(a1, a2 []int) []int {
return append(a1, a2...)
}
func median(values []float64) (float64, error) {
sort.Float64s(values)
if len(values) == 0 {
return 0, fmt.Errorf("empty")
}
if len(values)%2 == 0 {
return (values[len(values)/2 -1] + values[len(values)/2 ]) / 2, nil
}
return values[len(values)/2], nil
}
|
package command
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/markbates/inflect"
"github.com/mattn/echo-scaffold/template"
)
// ModelCommand generates files related to model.
type ModelCommand struct {
PackageName string
ModelName string
ModelNamePlural string
InstanceName string
InstanceNamePlural string
TemplateName string
Fields map[string]string
}
func (command *ModelCommand) Name() string {
return "model"
}
// Help prints a help message for this command.
func (command *ModelCommand) Help() {
fmt.Printf(`Usage:
echo-scaffold model <model name> <field name>:<field type> ...
Description:
The echo-scaffold model command creates a new model with the given fields.
Example:
echo-scaffold model Post Title:string Body:string
`)
}
func findFieldType(name string) string {
switch name {
case "text":
name = "string"
case "float":
name = "float64"
case "boolean":
name = "bool"
case "integer":
name = "int"
case "time", "datetime":
name = "int64"
}
return name
}
// Converts "<fieldname>:<type>" to {"<fieldname>": "<type>"}
func processFields(args []string) map[string]string {
fields := map[string]string{}
for _, arg := range args {
fieldNameAndType := strings.SplitN(arg, ":", 2)
fields[inflect.Titleize(fieldNameAndType[0])] = findFieldType(fieldNameAndType[1])
}
return fields
}
// Execute runs this command.
func (command *ModelCommand) Execute(args []string) {
flag := flag.NewFlagSet(command.Name(), flag.ExitOnError)
flag.Usage = command.Help
flag.Parse(args)
if flag.NArg() < 2 {
command.Help()
os.Exit(2)
}
command.ModelName = inflect.Titleize(flag.Arg(0))
command.ModelNamePlural = inflect.Pluralize(command.ModelName)
command.Fields = processFields(flag.Args()[1:])
command.InstanceName = inflect.CamelizeDownFirst(command.ModelName)
command.InstanceNamePlural = inflect.Pluralize(command.InstanceName)
command.PackageName = template.PackageName()
outputPath := filepath.Join("models", inflect.Underscore(command.ModelName)+".go")
builder := template.NewBuilder("model.go.tmpl")
builder.WriteToPath(outputPath, command)
outputPath = filepath.Join("models", inflect.Underscore(command.ModelName)+"_dbsession.go")
builder = template.NewBuilder("model_dbsession.go.tmpl")
builder.WriteToPath(outputPath, command)
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/getlantern/systray"
"github.com/op/go-logging"
"net"
"net/http"
"time"
)
const binanceSource = "binance"
const coincapSource = "coincap"
type Token struct {
ID string `json:"id"`
Symbol string `json:"symbol"`
Name string `json:"name"`
PriceUsd json.Number `json:"priceUsd"`
ChangePercent24Hr json.Number `json:"changePercent24Hr"`
Comment string `json:"comment"`
}
type App struct {
log *logging.Logger
client *http.Client
tokensLimit int
blinkOnUpdate bool
updatesDelay time.Duration
fileName string
}
func NewApp(tokensLimit int, blinkOnUpdate bool, updatesDelay time.Duration, fileName string) *App {
return &App{
log: logging.MustGetLogger(appName),
client: &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 2 * time.Second,
}).DialContext,
},
Timeout: 2 * time.Second,
},
tokensLimit: tokensLimit,
blinkOnUpdate: blinkOnUpdate,
updatesDelay: updatesDelay,
fileName: fileName,
}
}
func (a *App) Start() {
systray.Run(a.onReady, a.onExit)
}
func (a *App) onExit() {
}
func (a *App) onReady() {
var (
tokens []*Token
err error
)
source := make(chan string, 100)
clickedTokens := make(chan []*Token, 100)
// load saved tokens
go load(a.fileName, clickedTokens, source)
tokens, err = a.loadTokensForMenu()
if err != nil {
panic(err)
}
a.createMenu(tokens, clickedTokens, source)
a.menuLoop(clickedTokens, source)
}
func (a *App) loadTokensForMenu() ([]*Token, error) {
var err error
for i := 0; i < 10; i++ {
tokens, err := a.getTokens()
if err == nil {
a.log.Info("tokens loaded successful")
return tokens, err
}
a.log.Error("can't get tokens at start", err)
systray.SetTitle(appName + ": " + err.Error())
time.Sleep(2 * time.Second)
}
return nil, err
}
func (a *App) createMenu(tokens []*Token, clickedTokens chan []*Token, source chan string) {
menuToken := make(map[*systray.MenuItem]*Token)
menuSource := make(map[*systray.MenuItem]string)
// create menu item for each token
for _, token := range tokens {
menuItem := systray.AddMenuItem(token.Symbol, token.Name)
menuToken[menuItem] = token
go func() {
for {
select {
case <-menuItem.ClickedCh:
clickedTokens <- []*Token{menuToken[menuItem]}
}
}
}()
}
systray.AddSeparator()
for _, sourceName := range []string{coincapSource, binanceSource} {
menuItem := systray.AddMenuItem(sourceName, "")
menuSource[menuItem] = sourceName
go func() {
for {
select {
case <-menuItem.ClickedCh:
source <- menuSource[menuItem]
}
}
}()
}
systray.AddSeparator()
mQuit := systray.AddMenuItem("Quit", "Close the app")
go func() {
<-mQuit.ClickedCh
systray.Quit()
}()
}
func (a *App) menuLoop(chClickedTokens chan []*Token, chSource chan string) {
selected := make([]*Token, 0)
var source string
// wait for selected tokens or new source
go func() {
for {
select {
case clickedTokens := <-chClickedTokens:
for _, clickedTokenID := range clickedTokens {
found := false
for i, v := range selected {
if v.ID == clickedTokenID.ID { // already selected - delete it
found = true
selected = append(selected[:i], selected[i+1:]...)
}
}
if !found {
selected = append(selected, clickedTokenID)
}
}
a.updateTray(selected, source)
go save(a.fileName, selected, source)
case source = <-chSource:
a.updateTray(selected, source)
go save(a.fileName, selected, source)
}
}
}()
// Tokens periodic update
go func() {
for {
a.updateTray(selected, source)
time.Sleep(a.updatesDelay)
}
}()
}
func (a *App) updateTray(selected []*Token, source string) {
var (
trayTitle string
price, percent float64
)
if source != "" {
trayTitle = source + ": "
} else {
trayTitle = coincapSource + ": "
}
if len(selected) > 0 {
for _, token := range selected {
tokenUpdated, err := a.getToken(token, source)
if err != nil {
a.log.Error("can't get token", err)
trayTitle += fmt.Sprintf("%s - %s ", token.Symbol, err.Error())
} else if tokenUpdated != nil {
price, _ = tokenUpdated.PriceUsd.Float64()
trayTitle += fmt.Sprintf("%s - %.3f$ ", tokenUpdated.Symbol, price)
percent, _ = tokenUpdated.ChangePercent24Hr.Float64()
if percent != 0 {
trayTitle += fmt.Sprintf("[%.2f%%] ", percent)
}
}
}
} else {
trayTitle += "select coin"
}
if a.blinkOnUpdate {
systray.SetTitle("")
time.Sleep(100 * time.Millisecond)
}
systray.SetTitle(trayTitle)
}
func (a *App) getToken(token *Token, source string) (*Token, error) {
var err error
switch source {
case binanceSource:
token, err = a.getBinanceToken(token, "USDT")
case coincapSource:
token, err = a.getCoinCapToken(token)
default:
token, err = a.getCoinCapToken(token)
}
return token, err
}
func (a *App) getCoinCapToken(token *Token) (*Token, error) {
type Result struct {
Data *Token `json:"data"`
Timestamp int `json:"timestamp"`
}
result := Result{}
url := fmt.Sprintf("https://api.coincap.io/v2/assets/%s", token.ID)
r, err := makeRequest(context.Background(), a.client, "GET", url, nil, nil)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("connection problem")
}
defer func() {
err = r.Body.Close()
if err != nil {
a.log.Error(err)
}
}()
if r.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server is not available")
}
err = json.NewDecoder(r.Body).Decode(&result)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("parsing problem")
}
return result.Data, nil
}
func (a *App) getBinanceToken(token *Token, currency string) (*Token, error) {
type Result []struct {
Price json.Number `json:"price"`
}
result := Result{}
url := fmt.Sprintf("https://api.binance.com/api/v1/trades?limit=1&symbol=%s%s", token.Symbol, currency)
r, err := makeRequest(context.Background(), a.client, "GET", url, nil, nil)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("connection problem")
}
defer func() {
err = r.Body.Close()
if err != nil {
a.log.Error(err)
}
}()
if r.StatusCode == http.StatusBadRequest {
return token, fmt.Errorf("token not found")
}
if r.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server is not available")
}
err = json.NewDecoder(r.Body).Decode(&result)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("parsing problem")
}
token.PriceUsd = result[0].Price
token.ChangePercent24Hr = ""
return token, nil
}
func (a *App) getTokens() ([]*Token, error) {
type Result struct {
Data []*Token `json:"data"`
Timestamp int `json:"timestamp"`
}
result := Result{}
url := fmt.Sprintf("https://api.coincap.io/v2/assets?limit=%d", a.tokensLimit)
r, err := makeRequest(context.Background(), a.client, "GET", url, nil, nil)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("connection problem")
}
defer func() {
err = r.Body.Close()
if err != nil {
a.log.Error(err)
}
}()
if r.StatusCode != http.StatusOK {
a.log.Error(err)
return nil, fmt.Errorf("server is not available")
}
err = json.NewDecoder(r.Body).Decode(&result)
if err != nil {
a.log.Error(err)
return nil, fmt.Errorf("parsing problem")
}
return result.Data, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.