text stringlengths 11 4.05M |
|---|
package services
import (
"fmt"
"time"
"github.com/Volkov-D-A/vk-stitch-bot/pkg/config"
"github.com/Volkov-D-A/vk-stitch-bot/pkg/models"
"github.com/Volkov-D-A/vk-stitch-bot/pkg/repository"
)
type MessagingService struct {
repos *repository.Repository
config *config.Config
}
func NewMessagingService(repos *repository.Repository, config *config.Config) *MessagingService {
return &MessagingService{
repos: repos,
config: config,
}
}
func (ms *MessagingService) InitDatabase() error {
//Check filled database
cnt, err := ms.repos.CountRecipients(nil)
if err != nil {
return fmt.Errorf("error while counts database %v", err)
}
//if database empty
if cnt == 0 {
if err := ms.repos.SendMessage("Database initiation started", nil, &models.MessageRecipient{Id: ms.config.GroupOwner}); err != nil {
return fmt.Errorf("error while sending system message: %v", err)
}
count := 0
res, err := ms.repos.GetGroupUsers()
if err != nil {
return fmt.Errorf("error while getting group users %v", err)
}
for i := 0; i < len(res)-1; i++ {
result, err := ms.repos.CheckAllowedMessages(res[i])
if err != nil {
return fmt.Errorf("error while checking allowed messages: %v", err)
}
if result {
cnt, err := ms.repos.CountRecipients(res[i])
if err != nil {
return fmt.Errorf("error while checking presence recipient: %v", err)
}
if cnt == 0 {
if err = ms.repos.AddRecipient(&models.MessageRecipient{Id: res[i]}); err != nil {
return fmt.Errorf("error while adding recipient: %v", err)
}
count++
}
} else {
cnt, err := ms.repos.CountRecipients(res[i])
if err != nil {
return fmt.Errorf("error while checking presence recipient: %v", err)
}
if cnt == 1 {
if err = ms.repos.DeleteRecipient(&models.MessageRecipient{Id: res[i]}); err != nil {
return fmt.Errorf("error while deleting recipient: %v", err)
}
}
}
if i%20 == 0 {
time.Sleep(time.Second * 1)
}
}
}
if err := ms.repos.SendMessage("Database initiation finished. Processed %d group users, added %d messages recipients.", nil, &models.MessageRecipient{Id: ms.config.GroupOwner}); err != nil {
return fmt.Errorf("error while sending system message: %v", err)
}
return nil
}
func (ms *MessagingService) AddRecipient(rec *models.MessageRecipient) error {
err := ms.repos.AddRecipient(rec)
if err != nil {
return err
}
return nil
}
func (ms *MessagingService) DeleteRecipient(rec *models.MessageRecipient) error {
err := ms.repos.DeleteRecipient(rec)
if err != nil {
return err
}
return nil
}
func (ms *MessagingService) SendMultipleMessages() error {
list, err := ms.repos.GelAllRecipients()
if err != nil {
return err
}
fmt.Println(list)
return nil
}
|
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package sync2
import (
"context"
)
// ReceiverClosableChan is a channel with altered semantics
// from the go runtime channels. It is designed to work
// well in a many-producer, single-receiver environment,
// where the receiver consumes until it is shut down and
// must signal to many senders to stop sending.
type ReceiverClosableChan[T any] struct {
outstanding int
slots chan struct{}
vals chan T
receiverCalls int
}
// MakeReceiverClosableChan makes a new buffered channel of
// the given buffer size. A zero buffer size is currently
// undefined behavior.
func MakeReceiverClosableChan[T any](bufferSize int) *ReceiverClosableChan[T] {
if bufferSize <= 0 {
panic("invalid buffer size")
}
c := &ReceiverClosableChan[T]{
slots: make(chan struct{}, bufferSize),
vals: make(chan T, bufferSize),
outstanding: bufferSize,
}
for i := 0; i < bufferSize; i++ {
c.slots <- struct{}{}
}
return c
}
// BlockingSend will send the value into the channel's buffer. If the
// buffer is full, BlockingSend will block. BlockingSend will fail and return
// false if StopReceiving is called.
func (c *ReceiverClosableChan[T]) BlockingSend(v T) (ok bool) {
if _, ok := <-c.slots; !ok {
return false
}
c.vals <- v
return true
}
// Receive returns the next request, until and unless ctx is canceled.
// Receive does not stop if there are no more requests and StopReceiving
// has been called, as it is expected that the caller of Receive is
// who called StopReceiving.
// The error is not nil if and only if the context was canceled.
func (c *ReceiverClosableChan[T]) Receive(ctx context.Context) (v T, err error) {
// trigger the race detector if someone tries to call StopReceiving
// concurrently.
c.receiverCalls++
select {
case <-ctx.Done():
return v, ctx.Err()
case v := <-c.vals:
c.slots <- struct{}{}
return v, nil
}
}
// StopReceiving will cause all currently blocked and future
// sends to return false. StopReceiving will return what
// remains in the queue.
func (c *ReceiverClosableChan[T]) StopReceiving() (drained []T) {
// trigger the race detector if someone tries to call Receive concurrently.
c.receiverCalls++
close(c.slots)
for range c.slots {
c.outstanding--
}
for c.outstanding > 0 {
drained = append(drained, <-c.vals)
c.outstanding--
}
return drained
}
|
package server
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/gempir/gempbot/internal/api"
"github.com/gempir/gempbot/internal/log"
"github.com/gempir/gempbot/internal/store"
)
func (a *Api) BotConfigHandler(w http.ResponseWriter, r *http.Request) {
authResp, _, apiErr := a.authClient.AttemptAuth(r, w)
if apiErr != nil {
return
}
userID := authResp.Data.UserID
ownerLogin := authResp.Data.Login
if r.URL.Query().Get("managing") != "" {
userID, apiErr = a.userAdmin.CheckEditor(r, a.userAdmin.GetUserConfig(userID))
if apiErr != nil {
http.Error(w, apiErr.Error(), apiErr.Status())
return
}
uData, err := a.helixClient.GetUserByUserID(userID)
if err != nil {
api.WriteJson(w, fmt.Errorf("could not find managing user in helix"), http.StatusBadRequest)
return
}
ownerLogin = uData.Login
}
if r.Method == http.MethodGet {
cfg, err := a.db.GetBotConfig(userID)
if err != nil {
log.Error(err)
}
api.WriteJson(w, cfg, http.StatusOK)
return
} else if r.Method == http.MethodPost {
body, err := io.ReadAll(r.Body)
if err != nil {
log.Errorf("Failed reading update body: %s", err)
api.WriteJson(w, fmt.Errorf("Failure unmarshalling config "+err.Error()), http.StatusInternalServerError)
return
}
var botCfg store.BotConfig
if err := json.Unmarshal(body, &botCfg); err != nil {
log.Errorf("Failed unmarshalling botConfig: %s", err)
api.WriteJson(w, fmt.Errorf("Failure unmarshalling config "+err.Error()), http.StatusInternalServerError)
return
}
botCfg.OwnerTwitchID = userID
dbErr := a.db.SaveBotConfig(context.Background(), botCfg)
if dbErr != nil {
log.Error(dbErr)
api.WriteJson(w, fmt.Errorf("failed to save bot config"), http.StatusInternalServerError)
return
}
if botCfg.JoinBot {
a.bot.Join(ownerLogin)
} else {
a.bot.Part(ownerLogin)
}
return
}
http.Error(w, "unknown method", http.StatusMethodNotAllowed)
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util_test
import (
"github.com/argoproj/argo/v2/pkg/apis/workflow/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
"github.com/gardener/test-infra/pkg/common"
"github.com/gardener/test-infra/pkg/util"
)
var _ = Describe("testrun util test", func() {
Context("TestrunStatusPhase", func() {
It("should return success when the testrun was successfull", func() {
tr := &v1beta1.Testrun{Status: v1beta1.TestrunStatus{Phase: v1beta1.PhaseStatusSuccess}}
Expect(util.TestrunStatusPhase(tr)).To(Equal(v1beta1.PhaseStatusSuccess))
})
It("should return success even if a system component failed", func() {
tr := &v1beta1.Testrun{Status: v1beta1.TestrunStatus{
Phase: v1beta1.PhaseStatusError,
Steps: []*v1beta1.StepStatus{
newStepStatus(v1beta1.PhaseStatusSuccess, false),
newStepStatus(v1beta1.PhaseStatusError, true),
},
}}
Expect(util.TestrunStatusPhase(tr)).To(Equal(v1beta1.PhaseStatusSuccess))
})
It("should return error if one non system step fails", func() {
tr := &v1beta1.Testrun{Status: v1beta1.TestrunStatus{
Phase: v1beta1.PhaseStatusError,
Steps: []*v1beta1.StepStatus{
newStepStatus(v1beta1.PhaseStatusSuccess, true),
newStepStatus(v1beta1.PhaseStatusError, false),
},
}}
Expect(util.TestrunStatusPhase(tr)).To(Equal(v1beta1.PhaseStatusError))
})
It("should return the testrun state if all steps are in init state", func() {
tr := &v1beta1.Testrun{Status: v1beta1.TestrunStatus{
Phase: v1beta1.PhaseStatusError,
Steps: []*v1beta1.StepStatus{
newStepStatus(v1beta1.PhaseStatusInit, true),
newStepStatus(v1beta1.PhaseStatusInit, false),
},
}}
Expect(util.TestrunStatusPhase(tr)).To(Equal(v1beta1.PhaseStatusError))
})
It("should return the testrun state if all steps are in skipped state", func() {
tr := &v1beta1.Testrun{Status: v1beta1.TestrunStatus{
Phase: v1beta1.PhaseStatusError,
Steps: []*v1beta1.StepStatus{
newStepStatus(v1beta1.PhaseStatusSkipped, true),
newStepStatus(v1beta1.PhaseStatusSkipped, false),
},
}}
Expect(util.TestrunStatusPhase(tr)).To(Equal(v1beta1.PhaseStatusError))
})
})
})
func newStepStatus(phase v1alpha1.NodePhase, system bool) *v1beta1.StepStatus {
step := &v1beta1.StepStatus{
Phase: phase,
Annotations: map[string]string{},
}
if system {
step.Annotations[common.AnnotationSystemStep] = "true"
}
return step
}
|
package main
import (
"net/http"
"encoding/json"
"gomail-master"
"crypto/tls"
)
type MailSetting struct {
From string `json:"from"`
To string `json:"to"`
Text string `json:"text"`
SmtpServer string `json:"smtp_server"`
SmtpPort int `json:"smtp_port"`
UserName string `json:"user_name"`
Password string `json:"password"`
Status bool `json:"status"`
}
var mailSetting MailSetting
func SetMailConfig(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&mailSetting)
if err != nil {
panic(err)
}
resp := MailSetting{From: mailSetting.From, To: mailSetting.To, Status: mailSetting.Status}
json.NewEncoder(w).Encode(resp)
}
}
func GetMailConfig(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
resp := MailSetting{From: mailSetting.From, To: mailSetting.To, Status: mailSetting.Status}
json.NewEncoder(w).Encode(resp)
}
}
func TestMailConfig(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
decoder := json.NewDecoder(r.Body)
var msg message
err := decoder.Decode(&msg)
if err != nil {
panic(err)
}
if msg.Msg == "testConfig" {
err := SendMail("", "TestMonitorMessage")
if err != nil {
mailSetting.Status = false
resp := MailSetting{From: mailSetting.From, To: mailSetting.To, Status: mailSetting.Status}
json.NewEncoder(w).Encode(resp)
} else {
mailSetting.Status = true
resp := MailSetting{From: mailSetting.From, To: mailSetting.To, Status: mailSetting.Status}
json.NewEncoder(w).Encode(resp)
}
}
}
}
func SendMail(Text, Subject string) error {
m := gomail.NewMessage()
m.SetHeader("From", mailSetting.From)
m.SetHeader("To", mailSetting.To)
m.SetHeader("Subject", Subject)
m.SetBody("text/html", Text)
d := gomail.NewDialer(mailSetting.SmtpServer, mailSetting.SmtpPort, mailSetting.UserName, mailSetting.Password)
d.TLSConfig = &tls.Config{InsecureSkipVerify: true}
err := d.DialAndSend(m)
if err != nil {
return err
} else {
return err
}
} |
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crd
import (
"encoding/json"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/ghodss/yaml"
"github.com/spf13/afero"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
defPrefix = "#/definitions/"
inlineTag = "inline"
)
// Checks whether the typeName represents a simple json type
// Removes a character by replacing it with a space
func removeChar(str string, removedStr string) string {
return strings.Replace(str, removedStr, " ", -1)
}
// This is a hacky function that does the one job of
// extracting the tag values in the structs
// Example struct:
// type MyType struct {
// MyField string `yaml:"myField,omitempty"`
// }
//
// From the above example struct, we need to extract
// and return this: ("myField", "omitempty")
func extractFromTag(tag *ast.BasicLit) (string, string) {
if tag == nil || tag.Value == "" {
return "", ""
}
tagValue := tag.Value
tagValue = removeChar(tagValue, "`")
tagValue = removeChar(tagValue, `"`)
tagValue = strings.TrimSpace(tagValue)
var tagContent, tagKey string
fmt.Sscanf(tagValue, `%s %s`, &tagKey, &tagContent)
if tagKey != "json:" && tagKey != "yaml:" {
return "", ""
}
if strings.Contains(tagContent, ",") {
splitContent := strings.Split(tagContent, ",")
return splitContent[0], splitContent[1]
}
return tagContent, ""
}
// exprToSchema converts ast.Expr to JSONSchemaProps
func (f *file) exprToSchema(t ast.Expr, doc string, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) {
var def *v1beta1.JSONSchemaProps
var externalTypeRefs []TypeReference
switch tt := t.(type) {
case *ast.Ident:
def = f.identToSchema(tt, comments)
case *ast.ArrayType:
def, externalTypeRefs = f.arrayTypeToSchema(tt, doc, comments)
case *ast.MapType:
def = f.mapTypeToSchema(tt, doc, comments)
case *ast.SelectorExpr:
def, externalTypeRefs = f.selectorExprToSchema(tt, comments)
case *ast.StarExpr:
def, externalTypeRefs = f.exprToSchema(tt.X, "", comments)
case *ast.StructType:
def, externalTypeRefs = f.structTypeToSchema(tt)
case *ast.InterfaceType: // TODO: handle interface if necessary.
return &v1beta1.JSONSchemaProps{}, []TypeReference{}
}
def.Description = filterDescription(doc)
return def, externalTypeRefs
}
// identToSchema converts ast.Ident to JSONSchemaProps.
func (f *file) identToSchema(ident *ast.Ident, comments []*ast.CommentGroup) *v1beta1.JSONSchemaProps {
def := &v1beta1.JSONSchemaProps{}
if isSimpleType(ident.Name) {
def.Type = jsonifyType(ident.Name)
} else {
def.Ref = getPrefixedDefLink(ident.Name, f.pkgPrefix)
}
processMarkersInComments(def, comments...)
return def
}
// identToSchema converts ast.SelectorExpr to JSONSchemaProps.
func (f *file) selectorExprToSchema(selectorType *ast.SelectorExpr, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) {
pkgAlias := selectorType.X.(*ast.Ident).Name
typeName := selectorType.Sel.Name
typ := TypeReference{
TypeName: typeName,
PackageName: f.importPaths[pkgAlias],
}
time := TypeReference{TypeName: "Time", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1"}
duration := TypeReference{TypeName: "Duration", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1"}
quantity := TypeReference{TypeName: "Quantity", PackageName: "k8s.io/apimachinery/pkg/api/resource"}
unstructured := TypeReference{TypeName: "Unstructured", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"}
rawExtension := TypeReference{TypeName: "RawExtension", PackageName: "k8s.io/apimachinery/pkg/runtime"}
intOrString := TypeReference{TypeName: "IntOrString", PackageName: "k8s.io/apimachinery/pkg/util/intstr"}
switch typ {
case time:
return &v1beta1.JSONSchemaProps{
Type: "string",
Format: "date-time",
}, []TypeReference{}
case duration:
return &v1beta1.JSONSchemaProps{
Type: "string",
}, []TypeReference{}
case quantity:
return &v1beta1.JSONSchemaProps{
Type: "string",
}, []TypeReference{}
case unstructured, rawExtension:
return &v1beta1.JSONSchemaProps{
Type: "object",
}, []TypeReference{}
case intOrString:
return &v1beta1.JSONSchemaProps{
AnyOf: []v1beta1.JSONSchemaProps{
{
Type: "string",
},
{
Type: "integer",
},
},
}, []TypeReference{}
}
def := &v1beta1.JSONSchemaProps{
Ref: getPrefixedDefLink(typeName, f.importPaths[pkgAlias]),
}
processMarkersInComments(def, comments...)
return def, []TypeReference{{TypeName: typeName, PackageName: pkgAlias}}
}
// arrayTypeToSchema converts ast.ArrayType to JSONSchemaProps by examining the elements in the array.
func (f *file) arrayTypeToSchema(arrayType *ast.ArrayType, doc string, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) {
// not passing doc down to exprToSchema
items, extRefs := f.exprToSchema(arrayType.Elt, "", comments)
processMarkersInComments(items, comments...)
def := &v1beta1.JSONSchemaProps{
Type: "array",
Items: &v1beta1.JSONSchemaPropsOrArray{Schema: items},
Description: doc,
}
// TODO: clear the schema on the parent level, since it is on the children level.
return def, extRefs
}
// mapTypeToSchema converts ast.MapType to JSONSchemaProps.
func (f *file) mapTypeToSchema(mapType *ast.MapType, doc string, comments []*ast.CommentGroup) *v1beta1.JSONSchemaProps {
def := &v1beta1.JSONSchemaProps{}
switch mapType.Value.(type) {
case *ast.Ident:
valueType := mapType.Value.(*ast.Ident)
if def.AdditionalProperties == nil {
def.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{}
}
def.AdditionalProperties.Schema = new(v1beta1.JSONSchemaProps)
if isSimpleType(valueType.Name) {
def.AdditionalProperties.Schema.Type = valueType.Name
} else {
def.AdditionalProperties.Schema.Ref = getPrefixedDefLink(valueType.Name, f.pkgPrefix)
}
case *ast.InterfaceType:
// No op
panic("Map Interface Type")
}
def.Type = "object"
def.Description = doc
processMarkersInComments(def, comments...)
return def
}
// structTypeToSchema converts ast.StructType to JSONSchemaProps by examining each field in the struct.
func (f *file) structTypeToSchema(structType *ast.StructType) (*v1beta1.JSONSchemaProps, []TypeReference) {
def := &v1beta1.JSONSchemaProps{
Type: "object",
}
externalTypeRefs := []TypeReference{}
for _, field := range structType.Fields.List {
yamlName, option := extractFromTag(field.Tag)
if (yamlName == "" && option != inlineTag) || yamlName == "-" {
continue
}
if option != inlineTag && option != "omitempty" {
def.Required = append(def.Required, yamlName)
}
if def.Properties == nil {
def.Properties = make(map[string]v1beta1.JSONSchemaProps)
}
propDef, propExternalTypeDefs := f.exprToSchema(field.Type, field.Doc.Text(), f.commentMap[field])
externalTypeRefs = append(externalTypeRefs, propExternalTypeDefs...)
if option == inlineTag {
def.AllOf = append(def.AllOf, *propDef)
continue
}
def.Properties[yamlName] = *propDef
}
return def, externalTypeRefs
}
func getReachableTypes(startingTypes map[string]bool, definitions v1beta1.JSONSchemaDefinitions) map[string]bool {
pruner := DefinitionPruner{definitions, startingTypes}
prunedTypes := pruner.Prune(true)
return prunedTypes
}
type file struct {
// name prefix of the package
pkgPrefix string
// importPaths contains a map from import alias to the import path for the file.
importPaths map[string]string
// commentMap is comment mapping for this file.
commentMap ast.CommentMap
}
func (pr *prsr) parseTypesInFile(filePath string, curPkgPrefix string, skipCRD bool) (
v1beta1.JSONSchemaDefinitions, ExternalReferences, crdSpecByKind) {
// Open the input go file and parse the Abstract Syntax Tree
fset := token.NewFileSet()
srcFile, err := pr.fs.Open(filePath)
if err != nil {
log.Fatal(err)
}
node, err := parser.ParseFile(fset, filePath, srcFile, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
if !skipCRD {
// process top-level (not tied to a struct field) markers.
// e.g. group name marker +groupName=<group-name>
pr.processTopLevelMarkers(node.Comments)
}
definitions := make(v1beta1.JSONSchemaDefinitions)
externalRefs := make(ExternalReferences)
// Parse import statements to get "alias: pkgName" mapping
importPaths := make(map[string]string)
for _, importItem := range node.Imports {
pathValue := strings.Trim(importItem.Path.Value, "\"")
if importItem.Name != nil {
// Process aliased import
importPaths[importItem.Name.Name] = pathValue
} else if strings.Contains(pathValue, "/") {
// Process unnamed imports with "/"
segments := strings.Split(pathValue, "/")
importPaths[segments[len(segments)-1]] = pathValue
} else {
importPaths[pathValue] = pathValue
}
}
// Create an ast.CommentMap from the ast.File's comments.
// This helps keeping the association between comments and AST nodes.
// TODO: if necessary, support our own rules of comments ownership, golang's
// builtin rules are listed at https://golang.org/pkg/go/ast/#NewCommentMap.
// It seems it can meet our need at the moment.
cmap := ast.NewCommentMap(fset, node, node.Comments)
f := &file{
pkgPrefix: curPkgPrefix,
importPaths: importPaths,
commentMap: cmap,
}
crdSpecs := crdSpecByKind{}
for i := range node.Decls {
declaration, ok := node.Decls[i].(*ast.GenDecl)
if !ok {
continue
}
// Skip it if it's not type declaration.
if declaration.Tok != token.TYPE {
continue
}
// We support the following format
// // TreeNode doc
// type TreeNode struct {
// left, right *TreeNode
// value *Comparable
// }
// but not
// type (
// // Point doc
// Point struct{ x, y float64 }
// // Point2 doc
// Point2 struct{ x, y int }
// )
// since the latter format is rarely used in k8s.
if len(declaration.Specs) != 1 {
continue
}
ts := declaration.Specs[0]
typeSpec, ok := ts.(*ast.TypeSpec)
if !ok {
fmt.Printf("spec type is: %T\n", ts)
continue
}
typeName := typeSpec.Name.Name
typeDescription := declaration.Doc.Text()
fmt.Println("Generating schema definition for type:", typeName)
def, refTypes := f.exprToSchema(typeSpec.Type, typeDescription, []*ast.CommentGroup{})
definitions[getFullName(typeName, curPkgPrefix)] = *def
externalRefs[getFullName(typeName, curPkgPrefix)] = refTypes
var comments []string
for _, c := range f.commentMap[node.Decls[i]] {
comments = append(comments, strings.Split(c.Text(), "\n")...)
}
if !skipCRD {
crdSpec := parseCRDs(comments)
if crdSpec != nil {
crdSpec.Names.Kind = typeName
gk := schema.GroupKind{Kind: typeName}
crdSpecs[gk] = crdSpec
// TODO: validate the CRD spec for one version.
}
}
}
// Overwrite import aliases with actual package names
for typeName := range externalRefs {
for i, ref := range externalRefs[typeName] {
externalRefs[typeName][i].PackageName = importPaths[ref.PackageName]
}
}
return definitions, externalRefs, crdSpecs
}
// processTopLevelMarkers process top-level (not tied to a struct field) markers.
// e.g. group name marker +groupName=<group-name>
func (pr *prsr) processTopLevelMarkers(comments []*ast.CommentGroup) {
for _, c := range comments {
commentLines := strings.Split(c.Text(), "\n")
cs := Comments(commentLines)
if cs.hasTag("groupName") {
group := cs.getTag("groupName", "=")
if len(group) == 0 {
log.Fatalf("can't use an empty name for the +groupName marker")
}
if pr.generatorOptions != nil && len(pr.generatorOptions.group) > 0 && group != pr.generatorOptions.group {
log.Fatalf("can't have different group names %q and %q one package", pr.generatorOptions.group, group)
}
if pr.generatorOptions == nil {
pr.generatorOptions = &toplevelGeneratorOptions{group: group}
} else {
pr.generatorOptions.group = group
}
}
}
}
// mock this in testing.
var listFiles = func(pkgPath string) (string, []string, error) {
pkg, err := build.Import(pkgPath, "", 0)
return pkg.Dir, pkg.GoFiles, err
}
func (pr *prsr) parseTypesInPackage(pkgName string, referencedTypes map[string]bool, rootPackage, skipCRD bool) (
v1beta1.JSONSchemaDefinitions, crdSpecByKind) {
pkgDefs := make(v1beta1.JSONSchemaDefinitions)
pkgExternalTypes := make(ExternalReferences)
pkgCRDSpecs := make(crdSpecByKind)
pkgDir, listOfFiles, err := listFiles(pkgName)
if err != nil {
log.Fatal(err)
}
pkgPrefix := strings.Replace(pkgName, "/", ".", -1)
if rootPackage {
pkgPrefix = ""
}
fmt.Println("pkgPrefix=", pkgPrefix)
for _, fileName := range listOfFiles {
fmt.Println("Processing file ", fileName)
fileDefs, fileExternalRefs, fileCRDSpecs := pr.parseTypesInFile(filepath.Join(pkgDir, fileName), pkgPrefix, skipCRD)
mergeDefs(pkgDefs, fileDefs)
mergeExternalRefs(pkgExternalTypes, fileExternalRefs)
mergeCRDSpecs(pkgCRDSpecs, fileCRDSpecs)
}
// Add pkg prefix to referencedTypes
newReferencedTypes := make(map[string]bool)
for key := range referencedTypes {
altKey := getFullName(key, pkgPrefix)
newReferencedTypes[altKey] = referencedTypes[key]
}
referencedTypes = newReferencedTypes
fmt.Println("referencedTypes")
debugPrint(referencedTypes)
allReachableTypes := getReachableTypes(referencedTypes, pkgDefs)
for key := range pkgDefs {
if _, exists := allReachableTypes[key]; !exists {
delete(pkgDefs, key)
delete(pkgExternalTypes, key)
}
}
fmt.Println("allReachableTypes")
debugPrint(allReachableTypes)
fmt.Println("pkgDefs")
debugPrint(pkgDefs)
fmt.Println("pkgExternalTypes")
debugPrint(pkgExternalTypes)
uniquePkgTypeRefs := make(map[string]map[string]bool)
for _, item := range pkgExternalTypes {
for _, typeRef := range item {
if _, ok := uniquePkgTypeRefs[typeRef.PackageName]; !ok {
uniquePkgTypeRefs[typeRef.PackageName] = make(map[string]bool)
}
uniquePkgTypeRefs[typeRef.PackageName][typeRef.TypeName] = true
}
}
for childPkgName := range uniquePkgTypeRefs {
childTypes := uniquePkgTypeRefs[childPkgName]
childPkgPr := prsr{fs: pr.fs}
childDefs, _ := childPkgPr.parseTypesInPackage(childPkgName, childTypes, false, true)
mergeDefs(pkgDefs, childDefs)
}
return pkgDefs, pkgCRDSpecs
}
type SingleVersionOptions struct {
// InputPackage is the path of the input package that contains source files.
InputPackage string
// Types is a list of target types.
Types []string
// Flatten contains if we use a flattened structure or a embedded structure.
Flatten bool
// fs is provided FS. We can use afero.NewMemFs() for testing.
fs afero.Fs
}
type WriterOptions struct {
// OutputPath is the path that the schema will be written to.
OutputPath string
// OutputFormat should be either json or yaml. Default to json
OutputFormat string
defs v1beta1.JSONSchemaDefinitions
crdSpecs crdSpecByKind
}
type SingleVersionGenerator struct {
SingleVersionOptions
WriterOptions
outputCRD bool
}
type toplevelGeneratorOptions struct {
group string
}
type prsr struct {
generatorOptions *toplevelGeneratorOptions
fs afero.Fs
}
func (op *SingleVersionGenerator) Generate() {
if len(op.InputPackage) == 0 || len(op.OutputPath) == 0 {
log.Panic("Both input path and output paths need to be set")
}
if op.fs == nil {
op.fs = afero.NewOsFs()
}
if op.outputCRD {
// if generating CRD, we should always embed schemas.
op.Flatten = false
}
op.defs, op.crdSpecs = op.parse()
op.write(op.outputCRD, op.Types)
}
func (pr *prsr) linkCRDSpec(defs v1beta1.JSONSchemaDefinitions, crdSpecs crdSpecByKind) crdSpecByKind {
rtCRDSpecs := crdSpecByKind{}
for gk := range crdSpecs {
if pr.generatorOptions != nil {
crdSpecs[gk].Group = pr.generatorOptions.group
rtCRDSpecs[schema.GroupKind{Group: pr.generatorOptions.group, Kind: gk.Kind}] = crdSpecs[gk]
} else {
rtCRDSpecs[gk] = crdSpecs[gk]
}
if len(crdSpecs[gk].Versions) == 0 {
log.Printf("no version for CRD %q", gk)
continue
}
if len(crdSpecs[gk].Versions) > 1 {
log.Fatalf("the number of versions in one package is more than 1")
}
def, ok := defs[gk.Kind]
if !ok {
log.Printf("can't get json shchema for %q", gk)
continue
}
crdSpecs[gk].Versions[0].Schema = &v1beta1.CustomResourceValidation{
OpenAPIV3Schema: &def,
}
}
return rtCRDSpecs
}
func (op *SingleVersionOptions) parse() (v1beta1.JSONSchemaDefinitions, crdSpecByKind) {
startingPointMap := make(map[string]bool)
for i := range op.Types {
startingPointMap[op.Types[i]] = true
}
pr := prsr{fs: op.fs}
defs, crdSpecs := pr.parseTypesInPackage(op.InputPackage, startingPointMap, true, false)
// flattenAllOf only flattens allOf tags
flattenAllOf(defs)
reachableTypes := getReachableTypes(startingPointMap, defs)
for key := range defs {
if _, exists := reachableTypes[key]; !exists {
delete(defs, key)
}
}
checkDefinitions(defs, startingPointMap)
if !op.Flatten {
defs = embedSchema(defs, startingPointMap)
newDefs := v1beta1.JSONSchemaDefinitions{}
for name := range startingPointMap {
newDefs[name] = defs[name]
}
defs = newDefs
}
return defs, pr.linkCRDSpec(defs, crdSpecs)
}
func (op *WriterOptions) write(outputCRD bool, types []string) {
var toSerilizeList []interface{}
if outputCRD {
for gk, spec := range op.crdSpecs {
crd := &v1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: strings.ToLower(gk.Kind),
Labels: map[string]string{"controller-tools.k8s.io": "1.0"},
},
Spec: *spec,
}
toSerilizeList = append(toSerilizeList, crd)
}
} else {
schema := v1beta1.JSONSchemaProps{Definitions: op.defs}
schema.Type = "object"
schema.AnyOf = []v1beta1.JSONSchemaProps{}
for _, typeName := range types {
schema.AnyOf = append(schema.AnyOf, v1beta1.JSONSchemaProps{Ref: getDefLink(typeName)})
}
toSerilizeList = []interface{}{schema}
}
// TODO: create dir is not exist.
out, err := os.Create(op.OutputPath)
if err != nil {
log.Panic(err)
}
for i := range toSerilizeList {
switch strings.ToLower(op.OutputFormat) {
// default to json
case "json", "":
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
err = enc.Encode(toSerilizeList[i])
if err2 := out.Close(); err == nil {
err = err2
}
if err != nil {
log.Panic(err)
}
case "yaml":
m, err := yaml.Marshal(toSerilizeList[i])
if err != nil {
log.Panic(err)
}
err = ioutil.WriteFile(op.OutputPath, m, 0644)
if err != nil {
log.Panic(err)
}
}
}
}
|
package kademlia
import (
"fmt"
"math/big"
"net"
"net/rpc"
"sort"
"time"
)
type node struct {
IP string
ID *big.Int
kBuckets [B]kBucket
Data KVMap
publishMap KVMap
ON bool
}
type Node struct {
O node
Listen net.Listener
}
func (o *node) Init(port string) {
o.IP = GetLocalAddress() + ":" + port
o.ID = hashString(o.IP)
o.publishMap.Map = make(map[string]ValueTimePair)
o.Data.Map = make(map[string]ValueTimePair)
}
func (o *node) Join(addr string) {
hash := hashString(addr)
o.updateBucket(Contact{hash, addr})
o.iterativeFindNode(hash)
}
func (o *node) updateBucket(t Contact) {
if o.ID.Cmp(t.Id) == 0 {
return
}
k := distance(o.ID, t.Id).BitLen() - 1
o.kBuckets[k].update(t)
}
func (o *node) getValue(key string) (string, bool) {
o.Data.lock.Lock()
defer o.Data.lock.Unlock()
val, ok := o.Data.Map[key]
return val.val, ok
}
func (o *node) Ping(addr string) bool {
var success bool
for i := 0; i < 3; i++ {
chOK := make(chan bool)
go func() {
client, err := rpc.Dial("tcp", addr)
if err == nil {
err = client.Close()
chOK <- true
} else {
chOK <- false
}
}()
select {
case ok := <-chOK:
if ok {
success = true
} else {
continue
}
case <-time.After(time.Second / 2):
break
}
if success == true {
break
}
}
if success == false {
return false
}
client, err := Dial(addr)
if err != nil {
fmt.Println("Error:", err)
return false
}
var res PingReturn
err = client.Call("Node.RPCPing", Contact{new(big.Int).Set(o.ID), o.IP}, &res)
_ = client.Close()
if err != nil {
fmt.Println("Error:", err)
return false
}
if res.Success == true {
go o.updateBucket(res.Header)
}
return res.Success
}
func (o *node) getAlphaNodes(id *big.Int) []Contact {
var res []Contact
p := distance(o.ID, id).BitLen() - 1
o.kBuckets[p].mutex.Lock()
if o.kBuckets[p].size >= ALPHA {
for i := 0; i < ALPHA; i++ {
res = append(res, o.kBuckets[p].arr[i])
}
o.kBuckets[p].mutex.Unlock()
return res
}
o.kBuckets[p].mutex.Unlock()
var arr []Contact
for i := 0; i < B; i++ {
o.kBuckets[i].mutex.Lock()
for j := 0; j < o.kBuckets[i].size; j++ {
arr = append(arr, o.kBuckets[i].arr[j])
}
o.kBuckets[i].mutex.Unlock()
}
sort.Slice(arr, func(i, j int) bool {
return distance(arr[i].Id, id).Cmp(distance(arr[j].Id, id)) < 0
})
length := len(arr)
if length >= ALPHA {
for i := 0; i < ALPHA; i++ {
res = append(res, arr[i])
}
} else {
res = arr
}
return res
}
func (o *node) iterativeFindNode(id *big.Int) []Contact {
var arr []Contact
MAP := make(map[string]bool)
que := o.getAlphaNodes(new(big.Int).Set(id))
head := 0
for head < len(que) {
if MAP[que[head].Ip] == true {
head++
continue
}
if o.Ping(que[head].Ip) == true {
MAP[que[head].Ip] = true
arr = append(arr, que[head])
client, err := Dial(que[head].Ip)
if err != nil {
fmt.Println("Error:", err)
continue
}
var res FindNodeReturn
err = client.Call("Node.RPCFindNode", FindNodeRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
Id: id,
}, &res)
_ = client.Close()
if err != nil {
fmt.Println("Error:", err)
continue
}
go o.updateBucket(res.Header)
for _, v := range res.Closest {
que = append(que, v)
}
}
head++
}
sort.Slice(arr, func(i, j int) bool {
return distance(arr[i].Id, id).Cmp(distance(arr[j].Id, id)) < 0
})
if len(arr) >= bucketSize {
var res []Contact
for i := 0; i < bucketSize; i++ {
res = append(res, arr[i])
}
return res
} else {
return arr
}
}
func (o *node) iterativeFindValue(arg FindValueRequest) (string, bool) {
var arr []Contact
MAP := make(map[string]bool)
que := o.getAlphaNodes(new(big.Int).Set(arg.HashId))
head := 0
for head < len(que) {
if MAP[que[head].Ip] == true {
head++
continue
}
if o.Ping(que[head].Ip) == true {
MAP[que[head].Ip] = true
client, err := Dial(que[head].Ip)
if err != nil {
fmt.Println("Error:", err)
continue
}
var res FindValueReturn
err = client.Call("Node.RPCFindValue", FindValueRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
HashId: arg.HashId,
Key: arg.Key,
}, &res)
_ = client.Close()
if err != nil {
fmt.Println("Error:", err)
continue
}
go o.updateBucket(res.Header)
if res.Closest == nil { // already get the value
sort.Slice(arr, func(i, j int) bool {
return distance(arr[i].Id, arg.HashId).Cmp(distance(arr[j].Id, arg.HashId)) < 0
})
if len(arr) > 0 { // for caching
go func() {
client, err := Dial(arr[0].Ip)
if err != nil {
fmt.Println("Error:", err)
}
var storeReturn StoreReturn
err = client.Call("Node.RPCStore", StoreRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
Pair: KVPair{arg.Key, res.Val},
Expire: time.Now().Add(tExpire),
}, &storeReturn)
_ = client.Close()
if err != nil {
fmt.Println("Error:", err)
}
go o.updateBucket(storeReturn.Header)
}()
}
return res.Val, true
} else { // value not found so far
for _, v := range res.Closest {
que = append(que, v)
}
arr = append(arr, que[head])
}
}
head++
}
return "", false
}
func (o *node) iterativeStore(arg StoreRequest) bool {
hash := hashString(arg.Pair.Key)
closest := o.iterativeFindNode(new(big.Int).Set(hash))
success := false
for _, t := range closest {
client, err := Dial(t.Ip)
if err != nil {
fmt.Println("Error:", err)
continue
}
var res StoreReturn
err = client.Call("Node.RPCStore", StoreRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
Pair: arg.Pair,
Expire: time.Now().Add(tExpire),
}, &res)
_ = client.Close()
if err != nil {
fmt.Println("Error:", err)
continue
}
go o.updateBucket(res.Header)
if res.Success == true {
success = true
}
}
return success
}
func (o *node) Publish(key, value string, firstTime bool) bool {
o.iterativeStore(StoreRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
Pair: KVPair{key, value},
Expire: time.Now().Add(tExpire),
})
if firstTime == true {
o.publishMap.lock.Lock()
o.publishMap.Map[key] = ValueTimePair{
val: value,
expireTime: time.Now().Add(tExpire),
replicateTime: time.Time{},
}
o.publishMap.lock.Unlock()
}
return true
}
func (o *node) GetValue(key string) (string, bool) {
o.Data.lock.Lock()
val, ok := o.Data.Map[key]
if ok == true {
o.Data.lock.Unlock()
return val.val, true
}
o.Data.lock.Unlock()
o.publishMap.lock.Lock()
val, ok = o.publishMap.Map[key]
if ok == true {
o.publishMap.lock.Unlock()
return val.val, true
}
o.publishMap.lock.Unlock()
return o.iterativeFindValue(FindValueRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
HashId: hashString(key),
Key: key,
})
}
func (o *node) Republish() {
for o.ON {
o.publishMap.lock.Lock()
for k, v := range o.publishMap.Map {
if o.ON == false {
return
}
if time.Now().After(v.expireTime) {
o.Publish(k, v.val, false)
v.expireTime = time.Now().Add(tExpire)
}
}
o.publishMap.lock.Unlock()
time.Sleep(tRepublish)
}
}
func (o *node) ExpireReplicate() {
for o.ON {
replicate := make([]StoreRequest, 0)
o.Data.lock.Lock()
for k, v := range o.Data.Map {
if o.ON == false {
return
}
if time.Now().After(v.expireTime) {
delete(o.Data.Map, k)
} else if v.replicateTime.IsZero() == false && time.Now().After(v.replicateTime) {
replicate = append(replicate, StoreRequest{
Header: Contact{new(big.Int).Set(o.ID), o.IP},
Pair: KVPair{k, v.val},
Expire: v.expireTime,
})
}
}
o.Data.lock.Unlock()
for _, v := range replicate {
o.iterativeStore(v)
o.Data.Map[v.Pair.Key] = ValueTimePair{
val: v.Pair.Val,
expireTime: v.Expire,
replicateTime: time.Time{},
}
}
time.Sleep(tCheck)
}
}
func (o *node) Refresh() {
for o.ON {
for i := 0; i < B; i++ {
if o.ON == false {
return
}
if o.kBuckets[i].latestUpdate.Add(tRefresh).Before(time.Now()) {
o.iterativeFindNode(new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(i)), nil))
}
}
time.Sleep(tCheck)
}
}
|
package main
import (
"fmt"
"net/http"
"net/url"
_ "github.com/hiromisuzuki/clean-arch-example/docs" // docs is generated by Swag CLI, you have to import it.
"github.com/hiromisuzuki/clean-arch-example/app/infrastructure"
"github.com/spf13/viper"
"github.com/swaggo/http-swagger"
)
func init() {
//TODO:change ENV
viper.SetConfigFile(`config/local.json`)
err := viper.ReadInConfig()
if err != nil {
panic(err)
}
if viper.GetBool(`debug`) {
fmt.Println("Service RUN on DEBUG mode")
}
}
// @title clean architecture example
// @version 2.0
// @description hiromisuzuki/clean-arch-example generated docs.
// @termsOfService http://swagger.io/terms/
// @contact.name API Support
// @contact.url http://www.swagger.io/support
// @contact.email support@swagger.io
// @license.name Apache 2.0
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
// @host localhost:8080
// @BasePath /
func main() {
r := infrastructure.NewRouter(sqlConfig())
r.Get("/swagger/*", httpSwagger.WrapHandler)
http.ListenAndServe(viper.GetString(`server.address`), r)
}
func sqlConfig() *infrastructure.SQLConfig {
c := &infrastructure.SQLConfig{
DBMS: viper.GetString(`database.dbms`),
Host: viper.GetString(`database.host`),
Port: viper.GetString(`database.port`),
User: viper.GetString(`database.user`),
Password: viper.GetString(`database.pass`),
DBName: viper.GetString(`database.name`),
}
opt := &url.Values{}
opt.Add("parseTime", viper.GetString(`database.options.parse_time`))
opt.Add("loc", viper.GetString(`database.options.loc`))
c.Options = opt
return c
}
|
package middle
import (
"time"
jwtlib "github.com/dgrijalva/jwt-go"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/shandysiswandi/echo-service/pkg/is"
)
type (
UserJWT struct {
ID int `json:"id"`
Email string `json:"email"`
CompanyID int `json:"company_id"`
UserCompanyID int `json:"user_company_id"`
}
JWTClaim struct {
jwtlib.StandardClaims
SessID int `json:"sess_id"`
User UserJWT `json:"user"`
Lang interface{} `json:"lang"`
SessionSetting int `json:"session_setting"`
}
)
func JWT(key string, wl ...string) echo.MiddlewareFunc {
jwtCfg := middleware.JWTConfig{
Claims: &JWTClaim{},
SigningKey: []byte(key),
Skipper: func(c echo.Context) bool {
path := c.Request().URL.Path
return is.InArrayString(wl, path)
},
}
return middleware.JWTWithConfig(jwtCfg)
}
func CreateJWTToken(key string) (string, error) {
// Set custom claims
claims := &JWTClaim{
SessID: time.Now().Nanosecond(),
User: UserJWT{
ID: 1,
Email: "",
CompanyID: 1,
UserCompanyID: 1,
},
Lang: nil,
SessionSetting: 1,
StandardClaims: jwtlib.StandardClaims{
Audience: "audien",
ExpiresAt: time.Now().Add(30 * 24 * time.Hour).Unix(),
Id: "id",
IssuedAt: time.Now().Unix(),
Issuer: "issuer",
NotBefore: 0,
Subject: "subject",
},
}
// Create token with claims
token := jwtlib.NewWithClaims(jwtlib.SigningMethodHS256, claims)
return token.SignedString([]byte(key))
}
|
package http
import (
"github.com/gin-gonic/gin"
"github.com/saxon134/workflow/controller"
)
func initRoutes(group *gin.RouterGroup) {
Post(group, "upload.file", controller.Upload, AuthSign)
Post(group, "user.login", controller.UserLogin, AuthSign)
Gett(group, "user.list", controller.UserList, AuthMs)
Gett(group, "user.menus", controller.UserMenus, AuthMs)
Post(group, "user.save", controller.UserSave, AuthMs)
Post(group, "user.modify", controller.UserModify, AuthMs)
Gett(group, "project.list", controller.ProjectList, AuthMs)
Gett(group, "project.brief", controller.ProjectBrief, AuthMs)
Post(group, "project.save", controller.ProjectSave, AuthMs)
Gett(group, "demand.list", controller.DemandList, AuthMs)
Gett(group, "demand", controller.DemandDetail, AuthMs)
Post(group, "demand.add", controller.DemandAdd, AuthMs)
Post(group, "demand.update", controller.DemandUpdate, AuthMs)
Gett(group, "demand.tag.list", controller.TagList, AuthMs)
Post(group, "demand.tag.save", controller.TagSave, AuthMs)
Gett(group, "demand.chat.list", controller.ChatList, AuthMs)
Post(group, "demand.chat.save", controller.ChatSave, AuthMs)
Post(group, "demand.chat.update", controller.ChatUpdate, AuthMs)
Post(group, "demand.chat.resource.update", controller.ResourceUpdate, AuthMs)
Gett(group, "job.list", controller.JobList, AuthMs)
Post(group, "job.save", controller.JobSave, AuthMs)
//todo
Gett(group, "bug.list", Index, AuthMs)
Gett(group, "case.list", Index, AuthMs)
Gett(group, "dashboard", Index, AuthMs)
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/scjalliance/logmein"
"gopkg.in/gcfg.v1"
)
type config struct {
RSS struct {
ProfileID uint64 `gcfg:"profile"`
Key string `gcfg:"key"`
} `gcfg:"rss"`
}
var configFile = flag.String("c", "config.conf", "Config file")
func main() {
flag.Parse()
cfg := new(config)
err := gcfg.ReadFileInto(cfg, *configFile)
if err != nil {
fmt.Println("Config Error: ", err)
os.Exit(1)
}
lmi := logmein.NewLMI(cfg.RSS.ProfileID, cfg.RSS.Key)
computers := lmi.Fetch()
for _, computer := range computers {
log.Println("FETCH: ", computer.Name())
}
recv := make(chan *logmein.Computer)
stop := make(chan struct{})
go lmi.Watch(recv, stop, true)
for {
computer := <-recv
fmt.Printf("EVENT [%d]\n\tName: %s\n\tIP: %s\n\tStatus: %d\n\tTimestamp: %s\n", computer.HostID(), computer.Name(), computer.IPAddress(), computer.Status(), computer.Timestamp())
if computer.Unchanged() {
fmt.Println("\t• Unchanged")
}
if computer.IsDeleted() {
fmt.Println("\t• IsDeleted")
}
if computer.IsNew() {
fmt.Println("\t• IsNew")
}
if computer.IsChangedHostID() {
fmt.Printf("\t• IsChangedHostID [%d -> %d]\n", computer.OldHostID(), computer.HostID())
}
if computer.IsChangedName() {
fmt.Printf("\t• IsChangedName [%s -> %s]\n", computer.OldName(), computer.Name())
}
if computer.IsChangedIPAddress() {
fmt.Printf("\t• IsChangedIPAddress [%s -> %s]\n", computer.OldIPAddress(), computer.IPAddress())
}
if computer.IsChangedStatus() {
fmt.Printf("\t• IsChangedStatus [%d -> %d]\n", computer.OldStatus(), computer.Status())
}
fmt.Println("")
}
}
|
package path
import (
"testing"
)
var testTable = []struct {
in []string
out string
}{
{[]string{}, ""},
{[]string{"foo"}, "foo"},
{[]string{"/foo/bar/.."}, "/foo"},
{[]string{"foo", "bar"}, ""},
{[]string{"home/khoiracle", "home/khoiracle/foo", "home/khoiracle/bar"}, "home/khoiracle"},
{[]string{"home/khoiracle/bar/..", "home/khoiracle/foo"}, "home/khoiracle"},
{[]string{"/abc/bcd/cdf", "/abc/bcd/cdf/foo", "/abc/bcd/chi/hij", "/abc/bcd/cdd"}, "/abc/bcd"},
{[]string{"./abc/bcd/cdf", "./abc/bcd/cdf/foo", "./abc/bcd/chi/hij", "./abc/bcd/cdd"}, "abc/bcd"},
{[]string{"/abc/bcd/cdf", "/"}, "/"},
{[]string{"/abc/def/ghj", "/abc/def"}, "/abc/def"},
{[]string{"Github/khoi/ios", "Github/khoi/webcontent-ios", "Github/khoi/ios/iosNetworking"}, "Github/khoi"},
}
func TestLCP(t *testing.T) {
for _, c := range testTable {
if out := LCP(c.in); out != c.out {
t.Errorf("Expected %s - Got %s", c.out, out)
}
}
}
|
package rdb
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/dongmx/rdb"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
byteLF = byte('\n') // 换行符
)
var (
bytesSpace = []byte(" ")
psyncFullSyncCmd = []byte("*3\r\n$5\r\nPSYNC\r\n$1\r\n?\r\n$2\r\n-1\r\n") // 执行全量复制的命令
)
// Instance is the struct for instance node
type Instance struct {
Addr string
Target string
tconn net.Conn
lock sync.RWMutex
conn net.Conn
br *bufio.Reader
bw *bufio.Writer
barrierC chan struct{}
wg *sync.WaitGroup
offset int64
masterID string
}
func (inst *Instance) Sync() {
defer inst.wg.Done()
fmt.Printf("tring to sync with remote instance %s\n", inst.Addr)
// <-inst.barrierC // TODO 啥意思
for {
err := inst.sync()
if err != nil {
fmt.Printf("fail to syncing redis data due %s\n", err)
}
time.Sleep(time.Second * 30)
}
}
func (inst *Instance) sync() (err error) {
fmt.Printf("starting to sync with remote instance %s\n", inst.Addr)
defer inst.Close()
atomic.StoreInt64(&inst.offset, 0)
conn, err := net.Dial("tcp", inst.Addr) // 和远端 Redis 建立连接
if err != nil {
return err
}
inst.conn = conn
inst.bw = bufio.NewWriter(conn)
inst.br = bufio.NewReader(conn)
// 1. barrier run syncRDB
// 1.1 send psync ? -1 表示需要全量复制
fmt.Printf("start to sync rdb of %s\n", inst.Addr)
_ = writeAll(psyncFullSyncCmd, inst.bw) // 向 conn 中写入数据
_ = inst.bw.Flush() // 刷新数据到磁盘
var data []byte
data, err = inst.br.ReadBytes(byteLF) // 读取文件直到换行符
if err != nil {
return
}
fmt.Printf("parse paync reply of %s\n", inst.Addr)
err = inst.parsePSyncReply(data) // data 为服务端响应的数据的第一行 (+FULLRESYNC d3c009753b7dc26efbf1b6b04e63a93bcfbfb4b1 0)
if err != nil {
return
}
// because rdb was transformed by RESP Bulk String, we need ignore first line
// 忽略第一行的解析数据
/*
执行命令: PSYNC ? -1
+FULLRESYNC d3c009753b7dc26efbf1b6b04e63a93bcfbfb4b1 0
$206
REDIS0009� redis-ver5.0.0�
...
*/
for {
data, err = inst.br.ReadBytes(byteLF) // 读取到换行符
if err != nil {
return
}
// func Quote(s string) string
// 返回字符串s在go语法下的双引号字面值表示,控制字符、不可打印字符会进行转义。(如\t,\n,\xFF,\u0100)
fmt.Printf("read new line add %s with %s\n", inst.Addr, strconv.Quote(string(data)))
if len(data) > 0 && data[0] == byte('$') { // 一般的 结束符为 $206
fmt.Println("读取完PSYNC返回的信息,跳出循环")
break
}
}
// read full rdb
//err = inst.syncRDB(inst.Target)
//for {
// data, err = inst.br.ReadBytes(byteLF) // 读取到换行符
// fmt.Printf(string(data))
// time.Sleep(time.Second)
//}
decoder := NewDecoder()
err = rdb.Decode(inst.br, decoder)
cnt := NewCounter()
cnt.Count(decoder.Entries)
saa := GetData(cnt)
jsonBytes, _ := json.MarshalIndent(saa, "", " ")
fmt.Println(string(jsonBytes))
return
}
func (inst *Instance) parsePSyncReply(data []byte) error {
fmt.Printf("receive paync data %s reply as %s\n", inst.Addr, strconv.Quote(string(data)))
splited := bytes.Split(data, bytesSpace) // 根据空格进行分割
runidBs := string(splited[1]) // 运行的当前master id
offsetBs := string(splited[2][:len(splited[2])-2]) // 偏移量
offset, err := strconv.ParseInt(offsetBs, 10, 64)
if err != nil {
return err
}
inst.offset = offset
inst.masterID = runidBs
return nil
}
// Close the up and down steam
func (inst *Instance) Close() {
if inst.conn != nil {
inst.conn.Close()
}
if inst.tconn != nil {
inst.tconn.Close()
}
return
}
func (inst *Instance) syncRDB(addr string) (err error) {
fmt.Printf("start syncing rdb for %s", inst.Addr)
return nil
}
func writeAll(buf []byte, w io.Writer) error {
left := len(buf) // buf 中剩余的数据
for left != 0 {
size, err := w.Write(buf[len(buf)-left:])
if err != nil {
return err
}
left -= size
}
return nil
}
|
package cmd
import (
"bytes"
"context"
b64 "encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"time"
"github.com/gridscale/gscloud/render"
"github.com/gridscale/gscloud/runtime"
"github.com/gridscale/gscloud/utils"
"github.com/kardianos/osext"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientauth "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
func executablePath() string {
filePath, err := osext.Executable()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
return filePath
}
// clusterCmd represents the cluster command
var clusterCmd = &cobra.Command{
Use: "cluster",
Short: "Actions on a Kubernetes cluster",
Long: "Actions on a Kubernetes cluster",
}
// kubernetesCmd represents the Kubernetes command
var kubernetesCmd = &cobra.Command{
Use: "kubernetes",
Short: "Operate managed Kubernetes clusters",
Long: "Operate managed Kubernetes clusters.",
}
// getKubernetesReleasesCmd represents the releases command
var getKubernetesReleasesCmd = &cobra.Command{
Use: "releases",
Short: "Get available Kubernetes releases",
Long: "Prints all available Kubernetes releases. The latest three releases are supported.",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
out := new(bytes.Buffer)
op := rt.PaaSOperator()
paasTemplates, err := op.GetPaaSTemplateList(ctx)
if err != nil {
return NewError(cmd, "Could not get get list of Kubernetes releases", err)
}
var releases []string
for _, template := range paasTemplates {
if template.Properties.Flavour == "kubernetes" {
releases = append(releases, template.Properties.Release)
}
}
sort.Sort(sort.Reverse(utils.StringSorter(releases)))
if !rootFlags.json {
heading := []string{"releases"}
var rows [][]string
for _, rel := range releases {
rows = append(rows, []string{rel})
}
render.AsTable(out, heading, rows, renderOpts)
if rootFlags.quiet {
for _, rel := range releases {
fmt.Println(rel)
}
return nil
}
} else {
render.AsJSON(out, releases)
}
fmt.Print(out)
return nil
},
}
// saveKubeconfigCmd represents the kubeconfig command
var saveKubeconfigCmd = &cobra.Command{
Use: "save-kubeconfig",
Short: "Saves configuration of the given cluster into a kubeconfig",
Long: `Saves configuration of the given cluster into a kubeconfig or KUBECONFIG environment variable.
# ENVIRONMENT
KUBECONFIG
Specifies the path to the kubeconfig. Gets overriden by --kubeconfig
`,
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfigFile, _ := cmd.Flags().GetString("kubeconfig")
clusterID, _ := cmd.Flags().GetString("cluster")
credentialPlugin, _ := cmd.Flags().GetBool("credential-plugin")
kubeConfigEnv := os.Getenv("KUBECONFIG")
pathOptions := clientcmd.NewDefaultPathOptions()
if kubeConfigFile != "" {
kubeConfigEnv = kubeConfigFile
pathOptions.GlobalFile = kubeConfigFile
}
if kubeConfigEnv != "" && !utils.FileExists(kubeConfigEnv) {
_, err := os.Create(kubeConfigEnv)
if err != nil {
return NewError(cmd, "Could not create file", err)
}
}
currentKubeConfig, err := pathOptions.GetStartingConfig()
if err != nil {
return NewError(cmd, "Could not create starting config: %s", err)
}
op := rt.KubernetesOperator()
newKubeConfig, _, err := fetchKubeConfigFromProvider(op, clusterID)
if err != nil {
return NewError(cmd, "Invalid kubeconfig", err)
}
c := newKubeConfig.Clusters[0]
u := newKubeConfig.Users[0]
certificateAuthorityData, err := b64.StdEncoding.DecodeString(c.Cluster.CertificateAuthorityData)
if err != nil {
return NewError(cmd, "Could not decode certificate authority data", err)
}
currentKubeConfig.Clusters[c.Name] = &clientcmdapi.Cluster{
Server: c.Cluster.Server,
CertificateAuthorityData: certificateAuthorityData,
}
currentKubeConfig.AuthInfos[u.Name] = &clientcmdapi.AuthInfo{
ClientCertificate: u.User.ClientKeyData,
ClientKey: u.User.ClientCertificateData,
}
if credentialPlugin {
currentKubeConfig.AuthInfos[u.Name] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
APIVersion: clientauth.SchemeGroupVersion.String(),
Command: executablePath(),
Args: []string{
"--config",
viper.ConfigFileUsed(),
"--project",
rt.Project().Name,
"kubernetes",
"cluster",
"exec-credential",
"--cluster",
clusterID,
},
Env: []clientcmdapi.ExecEnvVar{},
},
}
} else {
clientCertificateData, err := b64.StdEncoding.DecodeString(u.User.ClientCertificateData)
if err != nil {
return NewError(cmd, "Could not decode client certificate data", err)
}
clientKeyData, err := b64.StdEncoding.DecodeString(u.User.ClientKeyData)
if err != nil {
return NewError(cmd, "Could not decode client key data", err)
}
currentKubeConfig.AuthInfos[u.Name] = &clientcmdapi.AuthInfo{
ClientCertificateData: clientCertificateData,
ClientKeyData: clientKeyData,
}
}
currentKubeConfig.Contexts[newKubeConfig.CurrentContext] = &clientcmdapi.Context{
Cluster: c.Name,
AuthInfo: u.Name,
}
currentKubeConfig.CurrentContext = newKubeConfig.CurrentContext
err = clientcmd.ModifyConfig(pathOptions, *currentKubeConfig, true)
if err != nil {
return NewError(cmd, "Could not modify config", err)
}
return nil
},
}
// execCredentialCmd represents the getCertificate command
var execCredentialCmd = &cobra.Command{
Use: "exec-credential",
Short: "Provides client credentials to kubectl command",
Long: "exec-credential provides client credentials to kubectl command.",
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfigFile, _ := cmd.Flags().GetString("kubeconfig")
clusterID, _ := cmd.Flags().GetString("cluster")
kubectlDefaults := clientcmd.NewDefaultPathOptions()
if kubeConfigFile != "" {
kubectlDefaults.GlobalFile = kubeConfigFile
}
_, err := kubectlDefaults.GetStartingConfig()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
execCredential, err := loadCachedKubeConfig(clusterID)
if err != nil {
return NewError(cmd, "Could not load cached kubeconfig", err)
}
op := rt.KubernetesOperator()
if execCredential == nil {
newKubeConfig, expirationTime, err := fetchKubeConfigFromProvider(op, clusterID)
if err != nil {
return NewError(cmd, "Could not fetch kubeconfig", err)
}
u := newKubeConfig.Users[0]
clientKeyData, err := b64.StdEncoding.DecodeString(u.User.ClientKeyData)
if err != nil {
fmt.Println(err)
}
clientCertificateData, err := b64.StdEncoding.DecodeString(u.User.ClientCertificateData)
if err != nil {
fmt.Println(err)
}
if expirationTime.IsZero() {
expirationTime = time.Now().Add(time.Hour)
}
execCredential = &clientauth.ExecCredential{
TypeMeta: metav1.TypeMeta{
Kind: "ExecCredential",
APIVersion: clientauth.SchemeGroupVersion.String(),
},
Status: &clientauth.ExecCredentialStatus{
ClientKeyData: string(clientKeyData),
ClientCertificateData: string(clientCertificateData),
ExpirationTimestamp: &metav1.Time{Time: expirationTime},
},
}
if err := cacheKubeConfig(clusterID, execCredential); err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
execCredentialJSON, err := json.MarshalIndent(execCredential, "", " ")
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
// this output will be used by kubectl
fmt.Println(string(execCredentialJSON))
return nil
},
}
func init() {
saveKubeconfigCmd.Flags().String("kubeconfig", "", "(optional) absolute path to the kubeconfig file. Overrides KUBECONFIG environment variable")
saveKubeconfigCmd.Flags().String("cluster", "", "The cluster's UUID")
saveKubeconfigCmd.MarkFlagRequired("cluster")
saveKubeconfigCmd.Flags().Bool("credential-plugin", false, "Enables credential plugin authentication method (exec-credential)")
clusterCmd.AddCommand(saveKubeconfigCmd)
execCredentialCmd.Flags().String("kubeconfig", "", "(optional) absolute path to the kubeconfig file")
execCredentialCmd.Flags().String("cluster", "", "The cluster's UUID")
execCredentialCmd.MarkFlagRequired("cluster")
clusterCmd.AddCommand(execCredentialCmd)
kubernetesCmd.AddCommand(clusterCmd, getKubernetesReleasesCmd)
rootCmd.AddCommand(kubernetesCmd)
}
func fetchKubeConfigFromProvider(op runtime.KubernetesOperator, id string) (kubeConfig, time.Time, error) {
var kc kubeConfig
var expirationTime time.Time
if err := op.RenewK8sCredentials(context.Background(), id); err != nil {
return kubeConfig{}, time.Time{}, err
}
platformService, err := op.GetPaaSService(context.Background(), id)
if err != nil {
return kubeConfig{}, time.Time{}, err
}
if len(platformService.Properties.Credentials) != 0 {
err := yaml.Unmarshal([]byte(platformService.Properties.Credentials[0].KubeConfig), &kc)
if err != nil {
return kubeConfig{}, time.Time{}, err
}
expirationTime = platformService.Properties.Credentials[0].ExpirationTime.Time
}
return kc, expirationTime, nil
}
func kubeConfigCachePath() string {
return filepath.Join(runtime.CachePath(), "exec-credential")
}
func cachedKubeConfigPath(id string) string {
return filepath.Join(kubeConfigCachePath(), id+".json")
}
func cacheKubeConfig(id string, execCredential *clientauth.ExecCredential) error {
if execCredential.Status.ExpirationTimestamp.IsZero() {
return nil
}
cachePath := kubeConfigCachePath()
if err := os.MkdirAll(cachePath, os.FileMode(0700)); err != nil {
return err
}
path := cachedKubeConfigPath(id)
f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(0600))
if err != nil {
return err
}
defer f.Close()
return json.NewEncoder(f).Encode(execCredential)
}
func loadCachedKubeConfig(id string) (*clientauth.ExecCredential, error) {
kubeConfigPath := cachedKubeConfigPath(id)
f, err := os.Open(kubeConfigPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
defer f.Close()
var execCredential *clientauth.ExecCredential
if err := json.NewDecoder(f).Decode(&execCredential); err != nil {
return nil, err
}
timeStamp := execCredential.Status.ExpirationTimestamp
if execCredential.Status == nil || timeStamp.IsZero() || timeStamp.Time.Before(time.Now()) {
err = os.Remove(kubeConfigPath)
return nil, err
}
return execCredential, nil
}
|
package main
import "strconv"
type Sessions struct {
SessionRequest chan chan Session
nextSession int
}
func NewSessions() *Sessions {
return &Sessions{
SessionRequest: make(chan chan Session),
}
}
func (s *Sessions) Run() {
for requestChan := range s.SessionRequest {
s.handleSessionRequest(requestChan)
}
}
func (s *Sessions) handleSessionRequest(requestChan chan Session) {
newSession := Session{
Id: strconv.Itoa(s.nextSession),
}
s.nextSession++
requestChan <- newSession
}
type Session struct {
Id string
}
|
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package rls
import (
"errors"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/rls/internal/cache"
"google.golang.org/grpc/balancer/rls/internal/keys"
"google.golang.org/grpc/metadata"
)
var errRLSThrottled = errors.New("RLS call throttled at client side")
// RLS rlsPicker selects the subConn to be used for a particular RPC. It does
// not manage subConns directly and usually deletegates to pickers provided by
// child policies.
//
// The RLS LB policy creates a new rlsPicker object whenever its ServiceConfig
// is updated and provides a bunch of hooks for the rlsPicker to get the latest
// state that it can used to make its decision.
type rlsPicker struct {
// The keyBuilder map used to generate RLS keys for the RPC. This is built
// by the LB policy based on the received ServiceConfig.
kbm keys.BuilderMap
// The following hooks are setup by the LB policy to enable the rlsPicker to
// access state stored in the policy. This approach has the following
// advantages:
// 1. The rlsPicker is loosely coupled with the LB policy in the sense that
// updates happening on the LB policy like the receipt of an RLS
// response, or an update to the default rlsPicker etc are not explicitly
// pushed to the rlsPicker, but are readily available to the rlsPicker
// when it invokes these hooks. And the LB policy takes care of
// synchronizing access to these shared state.
// 2. It makes unit testing the rlsPicker easy since any number of these
// hooks could be overridden.
// readCache is used to read from the data cache and the pending request
// map in an atomic fashion. The first return parameter is the entry in the
// data cache, and the second indicates whether an entry for the same key
// is present in the pending cache.
readCache func(cache.Key) (*cache.Entry, bool)
// shouldThrottle decides if the current RPC should be throttled at the
// client side. It uses an adaptive throttling algorithm.
shouldThrottle func() bool
// startRLS kicks off an RLS request in the background for the provided RPC
// path and keyMap. An entry in the pending request map is created before
// sending out the request and an entry in the data cache is created or
// updated upon receipt of a response. See implementation in the LB policy
// for details.
startRLS func(string, keys.KeyMap)
// defaultPick enables the rlsPicker to delegate the pick decision to the
// rlsPicker returned by the child LB policy pointing to the default target
// specified in the service config.
defaultPick func(balancer.PickInfo) (balancer.PickResult, error)
}
// Pick makes the routing decision for every outbound RPC.
func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
// For every incoming request, we first build the RLS keys using the
// keyBuilder we received from the LB policy. If no metadata is present in
// the context, we end up using an empty key.
km := keys.KeyMap{}
md, ok := metadata.FromOutgoingContext(info.Ctx)
if ok {
km = p.kbm.RLSKey(md, info.FullMethodName)
}
// We use the LB policy hook to read the data cache and the pending request
// map (whether or not an entry exists) for the RPC path and the generated
// RLS keys. We will end up kicking off an RLS request only if there is no
// pending request for the current RPC path and keys, and either we didn't
// find an entry in the data cache or the entry was stale and it wasn't in
// backoff.
startRequest := false
now := time.Now()
entry, pending := p.readCache(cache.Key{Path: info.FullMethodName, KeyMap: km.Str})
if entry == nil {
startRequest = true
} else {
entry.Mu.Lock()
defer entry.Mu.Unlock()
if entry.StaleTime.Before(now) && entry.BackoffTime.Before(now) {
// This is the proactive cache refresh.
startRequest = true
}
}
if startRequest && !pending {
if p.shouldThrottle() {
// The entry doesn't exist or has expired and the new RLS request
// has been throttled. Treat it as an error and delegate to default
// pick, if one exists, or fail the pick.
if entry == nil || entry.ExpiryTime.Before(now) {
if p.defaultPick != nil {
return p.defaultPick(info)
}
return balancer.PickResult{}, errRLSThrottled
}
// The proactive refresh has been throttled. Nothing to worry, just
// keep using the existing entry.
} else {
p.startRLS(info.FullMethodName, km)
}
}
if entry != nil {
if entry.ExpiryTime.After(now) {
// This is the jolly good case where we have found a valid entry in
// the data cache. We delegate to the LB policy associated with
// this cache entry.
return entry.ChildPicker.Pick(info)
} else if entry.BackoffTime.After(now) {
// The entry has expired, but is in backoff. We delegate to the
// default pick, if one exists, or return the error from the last
// failed RLS request for this entry.
if p.defaultPick != nil {
return p.defaultPick(info)
}
return balancer.PickResult{}, entry.CallStatus
}
}
// We get here only in the following cases:
// * No data cache entry or expired entry, RLS request sent out
// * No valid data cache entry and Pending cache entry exists
// We need to queue to pick which will be handled once the RLS response is
// received.
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
|
package tests
import (
"reflect"
"testing"
ravendb "github.com/ravendb/ravendb-go-client"
"github.com/stretchr/testify/assert"
)
// Note: renamed to Item2 to avoid conflicts
type Item2 struct {
ID string
Name string `json:"name"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
Latitude2 float64 `json:"latitude2"`
Longitude2 float64 `json:"longitude2"`
ShapeWkt string `json:"shapeWkt"`
}
func ravenDB8328_spatialOnAutoIndex(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
session := openSessionMust(t, store)
item := &Item2{
Latitude: 10,
Longitude: 20,
Latitude2: 10,
Longitude2: 20,
ShapeWkt: "POINT(20 10)",
Name: "Name1",
}
err = session.Store(item)
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
{
session := openSessionMust(t, store)
clazz := reflect.TypeOf(&Item2{})
q := session.QueryCollectionForType(clazz)
fieldName := ravendb.NewPointField("latitude", "longitude")
clause := func(f *ravendb.SpatialCriteriaFactory) ravendb.SpatialCriteria {
return f.WithinRadius(10, 10, 20)
}
q = q.Spatial2(fieldName, clause)
iq, err := q.GetIndexQuery()
assert.NoError(t, err)
assert.Equal(t, iq.GetQuery(), "from Item2s where spatial.within(spatial.point(latitude, longitude), spatial.circle($p0, $p1, $p2))")
q = session.QueryCollectionForType(clazz)
fieldName2 := ravendb.NewWktField("shapeWkt")
q = q.Spatial2(fieldName2, clause)
iq, err = q.GetIndexQuery()
assert.NoError(t, err)
assert.Equal(t, iq.GetQuery(), "from Item2s where spatial.within(spatial.wkt(shapeWkt), spatial.circle($p0, $p1, $p2))")
session.Close()
}
{
session := openSessionMust(t, store)
var statsRef *ravendb.QueryStatistics
var results []*Item2
q := session.QueryCollectionForType(reflect.TypeOf(&Item2{}))
q = q.Statistics(&statsRef)
fieldName := ravendb.NewPointField("latitude", "longitude")
clause := func(f *ravendb.SpatialCriteriaFactory) ravendb.SpatialCriteria {
return f.WithinRadius(10, 10, 20)
}
q = q.Spatial2(fieldName, clause)
err = q.GetResults(&results)
assert.NoError(t, err)
assert.Equal(t, len(results), 1)
assert.Equal(t, statsRef.IndexName, "Auto/Item2s/BySpatial.point(latitude|longitude)")
session.Close()
statsRef = nil
results = nil
q = session.QueryCollectionForType(reflect.TypeOf(&Item2{}))
q = q.Statistics(&statsRef)
fieldName = ravendb.NewPointField("latitude2", "longitude2")
q = q.Spatial2(fieldName, clause)
err = q.GetResults(&results)
assert.NoError(t, err)
assert.Equal(t, len(results), 1)
assert.Equal(t, statsRef.IndexName, "Auto/Item2s/BySpatial.point(latitude|longitude)AndSpatial.point(latitude2|longitude2)")
statsRef = nil
results = nil
q = session.QueryCollectionForType(reflect.TypeOf(&Item2{}))
q = q.Statistics(&statsRef)
fieldName2 := ravendb.NewWktField("shapeWkt")
q = q.Spatial2(fieldName2, clause)
err = q.GetResults(&results)
assert.NoError(t, err)
assert.Equal(t, len(results), 1)
assert.Equal(t, statsRef.IndexName, "Auto/Item2s/BySpatial.point(latitude|longitude)AndSpatial.point(latitude2|longitude2)AndSpatial.wkt(shapeWkt)")
}
}
func TestRavenDB8328(t *testing.T) {
driver := createTestDriver(t)
destroy := func() { destroyDriver(t, driver) }
defer recoverTest(t, destroy)
// matches order of Java tests
ravenDB8328_spatialOnAutoIndex(t, driver)
}
|
package midec
import (
"io"
)
const tmpLength = 256 * 3
// ReadAdvancer is the struct that can skip some bytes reading.
type ReadAdvancer struct {
io.Reader
tmp []byte
}
// NewReadAdvancer creates ReadAdvancer.
func NewReadAdvancer(r io.Reader) *ReadAdvancer {
var arr [tmpLength]byte
return &ReadAdvancer{
Reader: r,
tmp: arr[:],
}
}
// ReadFull is a shorthand for io.ReadFull.
func (a *ReadAdvancer) ReadFull(buf []byte) (int, error) {
return io.ReadFull(a.Reader, buf)
}
// Advance skips some bytes.
func (a *ReadAdvancer) Advance(n uint) error {
for n >= tmpLength {
buf := a.tmp[0:tmpLength]
if _, err := a.ReadFull(buf); err != nil {
return err
}
n -= tmpLength
}
if n > 0 {
buf := a.tmp[0:n]
if _, err := a.ReadFull(buf); err != nil {
return err
}
}
return nil
}
|
// MIT License
//
// Copyright (c) 2019 Adrian Houghton
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package darksky
// INFO: These JSON structs were mostly generated by using https://mholt.github.io/json-to-go/
// ForecastArgument - the argument object used to generated the call URL
type ForecastArgument struct {
Key string
Latitude float64
Longitude float64
ExcludeInfo []ForecastExclude
ExtendHourly bool
Language ForecastLanguage
Units ForecastUnits
DailyAPICalls int
}
// ForecastResponse - the main forecast response containing all the details for the location requested
type ForecastResponse struct {
Latitude float64 `json:"latitude,omitempty"`
Longitude float64 `json:"longitude,omitempty"`
Timezone string `json:"timezone,omitempty"`
Currently DataPoint `json:"currently,omitempty"`
Minutely DataBlock `json:"minutely,omitempty"`
Hourly DataBlock `json:"hourly,omitempty"`
Daily DataBlock `json:"daily,omitempty"`
Alerts []AlertBlock `json:"alerts,omitempty"`
Flags FlagsBlock `json:"flags,omitempty"`
StatusCode int64 `json:"code,omitempty"`
StatusError string `json:"error,omitempty"`
}
// DataPoint - A data point object contains various properties, each representing the average (unless otherwise specified) of a particular weather phenomenon
// occurring during a period of time: an instant in the case of currently, a minute for minutely, an hour for hourly, and a day for daily.
// See https://darksky.net/dev/docs#data-point-object
type DataPoint struct {
Time int64 `json:"time,omitempty"`
Summary string `json:"summary,omitempty"`
Icon string `json:"icon,omitempty"`
SunriseTime int64 `json:"sunriseTime,omitempty"`
SunsetTime int64 `json:"sunsetTime,omitempty"`
MoonPhase float64 `json:"moonPhase,omitempty"`
PrecipIntensity float64 `json:"precipIntensity,omitempty"`
PrecipIntensityMax float64 `json:"precipIntensityMax,omitempty"`
PrecipIntensityMaxTime int64 `json:"precipIntensityMaxTime,omitempty"`
PrecipProbability float64 `json:"precipProbability,omitempty"`
PrecipType string `json:"precipType,omitempty"`
TemperatureHigh float64 `json:"temperatureHigh,omitempty"`
TemperatureHighTime int64 `json:"temperatureHighTime,omitempty"`
TemperatureLow float64 `json:"temperatureLow,omitempty"`
TemperatureLowTime int64 `json:"temperatureLowTime,omitempty"`
ApparentTemperatureHigh float64 `json:"apparentTemperatureHigh,omitempty"`
ApparentTemperatureHighTime int64 `json:"apparentTemperatureHighTime,omitempty"`
ApparentTemperatureLow float64 `json:"apparentTemperatureLow,omitempty"`
ApparentTemperatureLowTime int64 `json:"apparentTemperatureLowTime,omitempty"`
DewPoint float64 `json:"dewPoint,omitempty"`
Humidity float64 `json:"humidity,omitempty"`
Pressure float64 `json:"pressure,omitempty"`
WindSpeed float64 `json:"windSpeed,omitempty"`
WindGust float64 `json:"windGust,omitempty"`
WindGustTime int64 `json:"windGustTime,omitempty"`
WindBearing int64 `json:"windBearing,omitempty"`
CloudCover float64 `json:"cloudCover,omitempty"`
UvIndex int64 `json:"uvIndex,omitempty"`
UvIndexTime int64 `json:"uvIndexTime,omitempty"`
Visibility float64 `json:"visibility,omitempty"`
Ozone float64 `json:"ozone,omitempty"`
TemperatureMin float64 `json:"temperatureMin,omitempty"`
TemperatureMinTime int64 `json:"temperatureMinTime,omitempty"`
TemperatureMax float64 `json:"temperatureMax,omitempty"`
TemperatureMaxTime int64 `json:"temperatureMaxTime,omitempty"`
ApparentTemperatureMin float64 `json:"apparentTemperatureMin,omitempty"`
ApparentTemperatureMinTime int64 `json:"apparentTemperatureMinTime,omitempty"`
ApparentTemperatureMax float64 `json:"apparentTemperatureMax,omitempty"`
ApparentTemperatureMaxTime int64 `json:"apparentTemperatureMaxTime,omitempty"`
}
// DataBlock - A data block object represents the various weather phenomena occurring over a period of time.
// See https://darksky.net/dev/docs#data-block-object
type DataBlock struct {
Data []DataPoint `json:"data,omitempty"`
Summary string `json:"summary,omitempty"`
Icon string `json:"icon,omitempty"`
}
// Alert - The alert obkect representing the severe weather warnings issued for the requested location by a governmental authority
// See https://darksky.net/dev/docs#response-alerts
type AlertBlock struct {
Description string `json:"description,omitempty"`
Expires int64 `json:"expires,omitempty"`
Regions []string `json:"regions,omitempty"`
Severity string `json:"severity,omitempty"`
Time int64 `json:"time,omitempty"`
Title string `json:"title,omitempty"`
Uri string `json:"uri,omitempty"`
}
// Flags - The flags object contains various metadata information related to the request.
// See https://darksky.net/dev/docs#response-flags
type FlagsBlock struct {
DarkSkyUnavailable string `json:"darksky-unavailable,omitempty"`
NearestStation float64 `json:"nearest-station,omitempty"`
Sources []string `json:"sources,omitempty"`
Units string `json:"units,omitempty"`
} |
package LeetCode
import (
"fmt"
"math"
)
func Code110() {
head := InitTree()
head = &TreeNode{1, nil, nil}
head.Left = &TreeNode{2, nil, nil}
head.Right = &TreeNode{2, nil, nil}
head.Left.Left = &TreeNode{3, nil, nil}
head.Left.Right = &TreeNode{3, nil, nil}
head.Left.Left.Left = &TreeNode{4, nil, nil}
head.Left.Left.Right = &TreeNode{4, nil, nil}
fmt.Println(isBalanced(head))
}
/**
给定一个二叉树,判断它是否是高度平衡的二叉树。
本题中,一棵高度平衡二叉树定义为:
一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过1。
示例 1:
给定二叉树 [3,9,20,null,null,15,7]
3
/ \
9 20
/ \
15 7
返回 true 。
示例 2:
给定二叉树 [1,2,2,3,3,null,null,4,4]
1
/ \
2 2
/ \
3 3
/ \
4 4
返回 false 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/balanced-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
func isBalanced(root *TreeNode) bool {
return dfs_110(root) != -1
}
func dfs_110(root *TreeNode) int {
if root == nil {
return 0
}
left := dfs_110(root.Left)
if left == -1 {
return -1
}
right := dfs_110(root.Right)
if right == -1 {
return -1
}
if math.Abs(float64(left-right)) < 2 {
return max(left, right) + 1
}
return -1
}
|
/*
Ex 8
Implement a function that given a directed graph
finds the strongly connected components.
Author: Xiaoyan ZHANG
*/
package main
import(
"fmt"
"strconv"
"container/list"
)
/*
directed graph represented by adjacency list
each index of the array represents a node
each list of the node contains all the connected nodes
length of the list represents how many edges it has
*/
type graph struct {
nodes []*list.List
}
//constructor of graph
func newGraph(cap int) *graph {
slice := make([]*list.List,cap)
for i:=0; i<cap; i++ {
list := list.New()
slice[i] = list
}
return &graph{slice}
}
//add a new edge x->y to graph
func (g *graph) addEdge(x,y int) {
if x < 0 || y < 0 || (x >= len(g.nodes)) || (y >= len(g.nodes)) {
fmt.Println("Error: index out of bound")
return
}
if x == y {
fmt.Println("Error: not an edge!")
return
}
for n := range g.nodes[x].Iter() {
if n.(int) == y { return }
}
g.nodes[x].PushBack(y)
}
//reverse the directed graph, i.e. x->y => y->x
// and return the new graph
func (g *graph) reverse() *graph {
if g==nil || len(g.nodes)==0 { return nil }
g_rev := newGraph(len(g.nodes))
for i:=0; i<len(g.nodes); i++ {
list := g.nodes[i]
for j := range list.Iter() {
g_rev.addEdge(j.(int),i)
}
}
return g_rev
}
//return string to show graph
func (g *graph) String() string{
s := "Graph: \n"
for i,list := range g.nodes {
s += "Node " + strconv.Itoa(i) + ":\t"
for n := range list.Iter() {
s += strconv.Itoa(n.(int)) + "\t"
}
s += "\n"
}
return s
}
/*
data structure used in finding strongly connected components
index of nodes_count represents the node
value of nodes_count represents the count number used in traversal
*/
type checklist struct {
nodes_count []int
count int
}
//depth first search
func (g *graph) dfs(chk *checklist, n int) {
list := g.nodes[n]
chk.nodes_count[n] = -1
for i := range list.Iter() {
if chk.nodes_count[i.(int)] == 0 {
g.dfs(chk, i.(int))
}
}
chk.count++
chk.nodes_count[n] = chk.count
}
//find the max value in the checklist
func (g *graph) find_max_value(chk *checklist) int {
current := 0
idx := -1
for i,c := range chk.nodes_count {
if c > current {
idx = i
current = c
}
}
if idx >= 0 { chk.nodes_count[idx] = -1 }
return idx
}
/*
find strongly connected components
with the help of dfs both graph and reverse_graph
*/
func (g *graph) find_str_conn_component() {
if len(g.nodes) <= 0 { return }
chk := &checklist{make([]int,len(g.nodes)),0}
for i,_ := range g.nodes {
if chk.nodes_count[i] == 0 {
g.dfs(chk,i)
}
}
g_rev := g.reverse()
chk_rev := &checklist{make([]int,len(g.nodes)),0}
component := list.New()
num_checked := 0
for chk_rev.count <= len(g.nodes) {
idx := g_rev.find_max_value(chk)
if idx < 0 { break }
if chk_rev.nodes_count[idx] == 0 {
g_rev.dfs(chk_rev,idx)
} else { continue }
list := list.New()
for i,c := range chk_rev.nodes_count {
if c > num_checked && c <= chk_rev.count {
list.PushBack(i)
}
}
num_checked = chk_rev.count
component.PushBack(list)
}
//Print out strongly connected components
i := 0
for l := range component.Iter() {
i++
fmt.Printf("strongly connected component %d:\n", i)
for sub := range l.(*list.List).Iter() {
fmt.Printf("%d\t", sub.(int))
}
fmt.Println("\n")
}
}
func main() {
fmt.Println("#Test case 1:")
g := newGraph(4)
g.addEdge(0,1)
g.addEdge(0,2)
g.addEdge(2,0)
g.addEdge(2,3)
g.addEdge(3,0)
fmt.Println(g)
g.find_str_conn_component()
fmt.Println("#Test case 2:")
g = newGraph(8)
g.addEdge(0,1)
g.addEdge(1,2)
g.addEdge(1,4)
g.addEdge(1,5)
g.addEdge(2,3)
g.addEdge(2,6)
g.addEdge(3,2)
g.addEdge(3,7)
g.addEdge(4,0)
g.addEdge(4,5)
g.addEdge(5,6)
g.addEdge(6,5)
g.addEdge(7,6)
g.addEdge(7,3)
fmt.Println(g)
g.find_str_conn_component()
}
|
package schedulepayout
import (
"errors"
"time"
"github.com/NodeFactoryIo/vedran/internal/configuration"
"github.com/NodeFactoryIo/vedran/internal/repositories"
"github.com/NodeFactoryIo/vedran/internal/script"
"github.com/NodeFactoryIo/vedran/internal/ui"
log "github.com/sirupsen/logrus"
)
// StartScheduledPayout checks every 24 hours how many days have passed since last payout.
// If number of passed days is equal or bigger than defined interval in configuration, start automatic payout
func StartScheduledPayout(configuration configuration.PayoutConfiguration, privateKey string, repos repositories.Repos) {
ticker := time.NewTicker(time.Hour * 24)
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
case <-ticker.C:
checkForPayout(privateKey, configuration, repos)
}
}
}()
}
// GetNextPayoutDate returns date of next scheduled payout or error if payout disabled
func GetNextPayoutDate(configuration *configuration.PayoutConfiguration, repos repositories.Repos) (time.Time, error) {
if configuration == nil {
return time.Now(), errors.New("Schedule payout not configured")
}
latestPayout, err := repos.PayoutRepo.FindLatestPayout()
if err != nil {
log.Errorf("Unable to calculate last payout because of: %v", err)
return time.Now(), err
}
return latestPayout.Timestamp.AddDate(0, 0, configuration.PayoutNumberOfDays), nil
}
func checkForPayout(
privateKey string,
configuration configuration.PayoutConfiguration,
repos repositories.Repos,
) {
daysSinceLastPayout, lastPayoutTimestamp, err := numOfDaysSinceLastPayout(repos)
if err != nil {
log.Error("Unable to calculate number of days since last payout", err)
return
}
if daysSinceLastPayout >= configuration.PayoutNumberOfDays {
go startPayout(privateKey, configuration)
} else {
log.Infof(
"Last payout was %s, next payout will be in %d days",
lastPayoutTimestamp.Format("2006-January-02"),
configuration.PayoutNumberOfDays-daysSinceLastPayout,
)
}
}
func startPayout(privateKey string, configuration configuration.PayoutConfiguration) {
log.Info("Starting automatic payout...")
transactionDetails, err := script.ExecutePayout(
privateKey,
configuration.PayoutTotalReward,
configuration.LbFeeAddress,
configuration.LbURL,
)
if transactionDetails != nil {
// display even if only part of transactions executed
ui.DisplayTransactionsStatus(transactionDetails)
}
if err != nil {
log.Errorf("Unable to execute payout, because of: %v", err)
return
} else {
log.Info("Payout execution finished")
}
}
func numOfDaysSinceLastPayout(repos repositories.Repos) (int, *time.Time, error) {
latestPayout, err := repos.PayoutRepo.FindLatestPayout()
if err != nil {
return 0, nil, err
}
daysSinceLastPayout := time.Since(latestPayout.Timestamp) / (24 * time.Hour)
return int(daysSinceLastPayout), &latestPayout.Timestamp, nil
}
|
package controllers
import (
"fmt"
"encoding/json"
"smartCity/models"
"github.com/astaxie/beego"
)
// Operations about Users
type UserController struct {
beego.Controller
}
// @Title CreateUser
// @Description create users
// @Param body body models.User true "body for user content"
// @Success 200 {int} models.User.Id
// @Failure 403 body is empty
// @router / [post]
func (u *UserController) Post() {
var user models.Userinfo
json.Unmarshal(u.Ctx.Input.RequestBody, &user)
/*
u1 := models.Userinfo{
Username: user.Username,
Password: user.Password,
Permission: "normal",
}*/
uid := models.AddUser(user)
u.Data["json"] = map[string]string{"uid": uid}
u.ServeJSON()
}
// @Title GetAll
// @Description get all Users
// @Success 200 {object} models.User
// @router / [get]
func (u *UserController) GetAll() {
users := models.GetAllUsers()
// TODO maybe need to modify
u.Data["json"] = users
u.ServeJSON()
}
// @Title Get
// @Description get user by uid
// @Param uid path string true "The key for staticblock"
// @Success 200 {object} models.User
// @Failure 403 :uid is empty
// @router /:uid [get]
func (u *UserController) Get() {
uid := u.GetString(":uid")
if uid != "" {
user, err := models.GetUser(uid)
if err != nil {
u.Data["json"] = err.Error()
} else {
u.Data["json"] = map[string]string{"uid": user.Username}
}
}
u.ServeJSON()
}
// @Title Update
// @Description update the user
// @Param uid path string true "The uid you want to update"
// @Param body body models.User true "body for user content"
// @Success 200 {object} models.User
// @Failure 403 :uid is not int
// @router /:uid [put]
func (u *UserController) Put() {
uid := u.GetString(":uid")
if uid != "" {
var user models.Userinfo
json.Unmarshal(u.Ctx.Input.RequestBody, &user)
/*
u1 := models.Userinfo{
Username: user.Username,
Password: user.Password,
Permission: "normal",
}*/
uu, err := models.UpdateUser(user)
if err != nil {
u.Data["json"] = err.Error()
} else {
u.Data["json"] = map[string]string{"uid": uu.Username}
}
}
u.ServeJSON()
}
// @Title Delete
// @Description delete the user
// @Param uid path string true "The uid you want to delete"
// @Success 200 {string} delete success!
// @Failure 403 uid is empty
// @router /:uid [delete]
func (u *UserController) Delete() {
uid := u.GetString(":uid")
models.DeleteUser(uid)
u.Data["json"] = "delete success!"
u.ServeJSON()
}
// @登录验证
// @Description Logs user into the system
// @Param username query string true "用户名"
// @Param password query string true "密码"
// @Success 200 {string} login success
// @Failure 403 user not exist
// @router /login [get]
func (u *UserController) Login() {
username := u.GetString("username")
password := u.GetString("password")
if models.Login(username, password) {
u.Data["json"] = "login success"
//获取当前用户信息存入session
u1, err1 := models.GetUser(username)
if err1 != nil {
fmt.Println("getuser error:", err1)
} else {
u.SetSession("listname", u1.Listname)
}
u.SetSession("name", username)
} else {
u.Data["json"] = "user not exist"
}
u.ServeJSON()
}
// @Title logout
// @Description Logs out current logged in user session
// @Success 200 {string} logout success
// @router /logout [get]
func (u *UserController) Logout() {
u.DelSession("name")
u.Data["json"] = "logout success"
u.ServeJSON()
}
|
package account
import (
"sync"
"testing"
"time"
"github.com/jrapoport/gothic/api/grpc/rpc/account"
"github.com/jrapoport/gothic/config"
"github.com/jrapoport/gothic/core/context"
"github.com/jrapoport/gothic/jwt"
"github.com/jrapoport/gothic/mail/template"
"github.com/jrapoport/gothic/test/tconf"
"github.com/jrapoport/gothic/test/tsrv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
)
func TestAccountServer_SendResetPassword(t *testing.T) {
t.Parallel()
s, smtp := tsrv.RPCServer(t, true)
srv := newAccountServer(s)
ctx := context.Background()
// invalid req
_, err := srv.SendResetPassword(ctx, nil)
assert.Error(t, err)
// empty email
req := &account.ResetPasswordRequest{}
_, err = srv.SendResetPassword(ctx, req)
assert.Error(t, err)
// bad email
req.Email = "bad"
_, err = srv.SendResetPassword(ctx, req)
assert.Error(t, err)
// not found
req.Email = "i-dont-exist@example.com"
_, err = srv.SendResetPassword(ctx, req)
assert.Error(t, err)
// success
var tok string
act := template.ResetPasswordAction
var mu sync.Mutex
smtp.AddHook(t, func(email string) {
mu.Lock()
defer mu.Unlock()
tok = tconf.GetEmailToken(act, email)
})
u := testUser(t, srv)
req.Email = u.Email
_, err = srv.SendResetPassword(ctx, req)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
return tok != ""
}, 1*time.Second, 10*time.Millisecond)
_, err = srv.API.BanUser(ctx, u.ID)
require.NoError(t, err)
_, err = srv.SendResetPassword(ctx, req)
assert.Error(t, err)
}
func TestAccountServer_SendResetPassword_RateLimit(t *testing.T) {
s, smtp := tsrv.RPCServer(t, true)
srv := newAccountServer(s)
ctx := context.Background()
srv.Config().Mail.SendLimit = 5 * time.Minute
srv.Config().Signup.AutoConfirm = true
u := testUser(t, srv)
var sent string
smtp.AddHook(t, func(email string) {
sent = email
})
for i := 0; i < 2; i++ {
sent = ""
req := &account.ResetPasswordRequest{
Email: u.Email,
}
_, err := srv.SendResetPassword(ctx, req)
if i == 0 {
assert.NoError(t, err)
assert.Eventually(t, func() bool {
return sent != ""
}, 1*time.Second, 10*time.Millisecond)
} else {
test := s.RPCError(codes.DeadlineExceeded,
config.ErrRateLimitExceeded)
require.NotNil(t, test)
assert.EqualError(t, err, test.Error())
assert.Never(t, func() bool {
return sent != ""
}, 1*time.Second, 10*time.Millisecond)
}
}
}
func TestAccountServer_ConfirmResetPassword(t *testing.T) {
t.Parallel()
const newPass = "sxjAm7QJ4?3dH!aN8T3F5P!oNnpXbaRy#gtx#8jG"
s, smtp := tsrv.RPCServer(t, true)
srv := newAccountServer(s)
srv.Config().Signup.AutoConfirm = false
srv.Config().Mail.SendLimit = 0
ctx := context.Background()
// invalid req
_, err := srv.ConfirmResetPassword(ctx, nil)
assert.Error(t, err)
// empty password
req := &account.ConfirmPasswordRequest{}
_, err = srv.ConfirmResetPassword(ctx, req)
assert.Error(t, err)
// empty token
req = &account.ConfirmPasswordRequest{
Password: "bad",
}
_, err = srv.ConfirmResetPassword(ctx, req)
assert.Error(t, err)
// bad token
req.Token = "bad"
_, err = srv.ConfirmResetPassword(ctx, req)
assert.Error(t, err)
// first get the change token
u := testUser(t, srv)
assert.False(t, u.IsConfirmed())
var tok string
act := template.ResetPasswordAction
smtp.AddHook(t, func(email string) {
tok = tconf.GetEmailToken(act, email)
})
pw := &account.ResetPasswordRequest{
Email: u.Email,
}
_, err = srv.SendResetPassword(ctx, pw)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
return tok != ""
}, 1*time.Second, 10*time.Millisecond)
// now use the token to change the password
req = &account.ConfirmPasswordRequest{
Token: tok,
Password: newPass,
}
res, err := srv.ConfirmResetPassword(ctx, req)
assert.NoError(t, err)
u, err = srv.GetUser(u.ID)
assert.NoError(t, err)
assert.True(t, u.IsConfirmed())
claims, err := jwt.ParseUserClaims(srv.Config().JWT, res.Access)
assert.NoError(t, err)
require.NotNil(t, claims)
assert.Equal(t, u.ID.String(), claims.Subject())
u, err = srv.GetUser(u.ID)
assert.NoError(t, err)
err = u.Authenticate(newPass)
assert.NoError(t, err)
}
|
package utils
import (
"bytes"
"runtime"
"strconv"
)
func TakeStacktrace(skip int) string {
buffer := bytes.NewBuffer(make([]byte, 0, 1000))
programCounters := make([]uintptr, 10000)
var numFrames int
for {
// Skip the call to runtime.Callers and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
numFrames = runtime.Callers(skip+2, programCounters)
if numFrames < len(programCounters) {
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = make([]uintptr, len(programCounters) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters[:numFrames])
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if i != 0 {
buffer.WriteByte('\n')
}
i++
buffer.WriteString(frame.Function)
buffer.WriteByte('\n')
buffer.WriteByte('\t')
buffer.WriteString(frame.File)
buffer.WriteByte(':')
buffer.WriteString(strconv.Itoa(frame.Line))
}
return buffer.String()
}
|
package main
import (
"bufio"
"errors"
"fmt"
"log"
"math"
"math/rand"
"os"
"regexp"
"strings"
"time"
"unicode"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
)
type neuralNet struct {
config neuralNetConfig
wHidden *mat.Dense
bHidden *mat.Dense
wOut *mat.Dense
bOut *mat.Dense
}
type neuralNetConfig struct {
inputNeurons int
outputNeurons int
hiddenNeurons int
numEpochs int
learningRate float64
}
func main() {
inputs, labels := makeInputAndLabels("./chats.txt")
config := neuralNetConfig{
inputNeurons: 28,
outputNeurons: 7,
hiddenNeurons: 28,
numEpochs: 3000,
learningRate: 0.05,
}
network := newNetwork(config)
if err := network.train(inputs, labels); err != nil {
log.Fatal(err)
}
predictions, err := network.predict(inputs)
if err != nil {
log.Fatal(err)
}
var truePosNeg int
numPreds, _ := predictions.Dims()
for i := 0; i < numPreds; i++ {
labelRow := mat.Row(nil, i, labels)
var prediction int
for idx, label := range labelRow {
if label == 1.0 {
prediction = idx
break
}
}
if predictions.At(i, prediction) == floats.Max(mat.Row(nil, i, predictions)) {
truePosNeg++
}
}
accuracy := float64(truePosNeg) / float64(numPreds)
fmt.Printf("\nAccuracy = %0.2f\n\n", accuracy)
}
func newNetwork(config neuralNetConfig) *neuralNet {
return &neuralNet{config: config}
}
func sigmoid(x float64) float64 {
return 1.0 / (1.0 + math.Exp(-x))
}
func sigmoidPrime(x float64) float64 {
return sigmoid(x) * (1.0 - sigmoid(x))
}
func (nn *neuralNet) train(x, y *mat.Dense) error {
randSource := rand.NewSource(time.Now().UnixNano())
randGen := rand.New(randSource)
wHidden := mat.NewDense(nn.config.inputNeurons, nn.config.hiddenNeurons, nil)
bHidden := mat.NewDense(1, nn.config.hiddenNeurons, nil)
wOut := mat.NewDense(nn.config.hiddenNeurons, nn.config.outputNeurons, nil)
bOut := mat.NewDense(1, nn.config.outputNeurons, nil)
wHiddenRaw := wHidden.RawMatrix().Data
bHiddenRaw := bHidden.RawMatrix().Data
wOutRaw := wOut.RawMatrix().Data
bOutRaw := bOut.RawMatrix().Data
for _, param := range [][]float64{
wHiddenRaw,
bHiddenRaw,
wOutRaw,
bOutRaw,
} {
for i := range param {
param[i] = randGen.Float64()
}
}
output := new(mat.Dense)
if err := nn.backpropagate(x, y, wHidden, bHidden, wOut, bOut, output); err != nil {
return err
}
nn.wHidden = wHidden
nn.bHidden = bHidden
nn.wOut = wOut
nn.bOut = bOut
return nil
}
func (nn *neuralNet) backpropagate(x, y, wHidden, bHidden, wOut, bOut, output *mat.Dense) error {
for i := 0; i < nn.config.numEpochs; i++ {
hiddenLayerInput := new(mat.Dense)
fmt.Println(x, wHidden)
hiddenLayerInput.Mul(x, wHidden)
addBHidden := func(_, col int, v float64) float64 { return v + bHidden.At(0, col) }
hiddenLayerInput.Apply(addBHidden, hiddenLayerInput)
hiddenLayerActivations := new(mat.Dense)
applySigmoid := func(_, _ int, v float64) float64 { return sigmoid(v) }
hiddenLayerActivations.Apply(applySigmoid, hiddenLayerInput)
outputLayerInput := new(mat.Dense)
outputLayerInput.Mul(hiddenLayerActivations, wOut)
addBOut := func(_, col int, v float64) float64 { return v + bOut.At(0, col) }
outputLayerInput.Apply(addBOut, outputLayerInput)
output.Apply(applySigmoid, outputLayerInput)
networkError := new(mat.Dense)
networkError.Sub(y, output)
slopeOutputLayer := new(mat.Dense)
applySigmoidPrime := func(_, _ int, v float64) float64 { return sigmoidPrime(v) }
slopeOutputLayer.Apply(applySigmoidPrime, output)
slopeHiddenLayer := new(mat.Dense)
slopeHiddenLayer.Apply(applySigmoidPrime, hiddenLayerActivations)
dOutput := new(mat.Dense)
dOutput.MulElem(networkError, slopeOutputLayer)
errorAtHiddenLayer := new(mat.Dense)
errorAtHiddenLayer.Mul(dOutput, wOut.T())
dHiddenLayer := new(mat.Dense)
dHiddenLayer.MulElem(errorAtHiddenLayer, slopeHiddenLayer)
wOutAdj := new(mat.Dense)
wOutAdj.Mul(hiddenLayerActivations.T(), dOutput)
wOutAdj.Scale(nn.config.learningRate, wOutAdj)
wOutAdj.Add(wOut, wOutAdj)
bOutAdj, err := sumAlongAxis(0, dOutput)
if err != nil {
return err
}
bOutAdj.Scale(nn.config.learningRate, bOutAdj)
bOutAdj.Add(bOut, bOutAdj)
wHiddenAdj := new(mat.Dense)
wHiddenAdj.Mul(x.T(), dHiddenLayer)
wHiddenAdj.Scale(nn.config.learningRate, wHiddenAdj)
wHidden.Add(wHidden, wHiddenAdj)
bHiddenAdj, err := sumAlongAxis(0, dHiddenLayer)
if err != nil {
return err
}
bHiddenAdj.Scale(nn.config.learningRate, bHiddenAdj)
bHidden.Add(bHidden, bHiddenAdj)
}
return nil
}
func sumAlongAxis(axis int, m *mat.Dense) (*mat.Dense, error) {
numRows, numCols := m.Dims()
var output *mat.Dense
switch axis {
case 0:
data := make([]float64, numCols)
for i := 0; i < numCols; i++ {
col := mat.Col(nil, i, m)
data[i] = floats.Sum(col)
}
output = mat.NewDense(1, numCols, data)
case 1:
data := make([]float64, numRows)
for i := 0; i < numRows; i++ {
row := mat.Row(nil, i, m)
data[i] = floats.Sum(row)
}
output = mat.NewDense(numRows, 1, data)
default:
return nil, errors.New("invalid axis, must be 0 or 1")
}
return output, nil
}
func (nn neuralNet) predict(x *mat.Dense) (*mat.Dense, error) {
output := new(mat.Dense)
hiddenLayerInput := new(mat.Dense)
hiddenLayerInput.Mul(x, nn.wHidden)
addBHidden := func(_, col int, v float64) float64 { return v + nn.bHidden.At(0, col) }
hiddenLayerInput.Apply(addBHidden, hiddenLayerInput)
hiddenLayerActivations := new(mat.Dense)
applySigmoid := func(_, _ int, v float64) float64 { return sigmoid(v) }
hiddenLayerActivations.Apply(applySigmoid, hiddenLayerInput)
outputLayerInput := new(mat.Dense)
outputLayerInput.Mul(hiddenLayerActivations, nn.wOut)
addBOut := func(_, col int, v float64) float64 { return v + nn.bOut.At(0, col) }
outputLayerInput.Apply(addBOut, outputLayerInput)
output.Apply(applySigmoid, outputLayerInput)
return output, nil
}
func makeInputAndLabels(fileName string) (*mat.Dense, *mat.Dense) {
var X []string
var y = [28]int{0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}
file, err := os.Open(fileName)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
X = append(X, scanner.Text())
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
var nColumns = 28
var nLabels = 7
var inputsData []float64
var labelsData []float64
var cleanedTextArray []string
for i := 0; i < len(X); i++ {
cleanedTextArray = append(cleanedTextArray, clear_text(X[i]))
}
for i := 0; i < len(cleanedTextArray); i++ {
var cleanedText string = cleanedTextArray[i]
var textInt []float64
for j := 0; j < len(cleanedText); j++ {
var char = string(cleanedText[j])
textInt = append(textInt, float64(word_to_idx[char]))
}
if len(textInt) < nColumns {
diff := nColumns - len(textInt)
for i := 0; i < diff; i++ {
textInt = append(textInt, 0)
}
}
inputsData = append(inputsData, textInt...)
}
for i := 0; i < len(y); i++ {
var labelHot []float64
for j := 0; j < nLabels; j++ {
if y[i] == j || y[i] == nLabels {
labelHot = append(labelHot, 1)
} else {
labelHot = append(labelHot, 0)
}
}
labelsData = append(labelsData, labelHot...)
}
inputs := mat.NewDense(len(cleanedTextArray), nColumns, inputsData)
labels := mat.NewDense(len(cleanedTextArray), nLabels, labelsData)
return inputs, labels
}
func clear_text(text string) string {
t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
output, _, e := transform.String(t, text)
if e != nil {
panic(e)
}
reg, err := regexp.Compile("[^a-zA-Z!?]+")
if err != nil {
log.Fatal(err)
}
text2 := reg.ReplaceAllString(output, "")
text2 = strings.Replace(text2, "?", "", -1)
text2 = strings.ToLower(text2)
return text2
}
var word_to_idx = map[string]int{
"a": 1,
"b": 2,
"c": 3,
"d": 4,
"e": 5,
"f": 6,
"g": 7,
"h": 8,
"i": 9,
"j": 10,
"k": 11,
"l": 12,
"m": 13,
"n": 14,
"ñ": 15,
"o": 16,
"p": 17,
"q": 18,
"r": 19,
"s": 20,
"t": 21,
"u": 22,
"v": 23,
"w": 24,
"x": 25,
"y": 26,
"z": 27,
}
|
package main
type article struct {
ID int json:"id"
Title string json:"title"
Content string json:"content"
}
var articleList = []article{
article{ID:1, Title:"Article 1", Content:"Article 1 body"},
article{ID:2, Title:"Article 2", Content:"Article 2 body"}
}
func getAllArticles() []article {
return articleList
} |
package payserver
import (
"bytes"
"centerclient"
"common"
"crypto/md5"
"encoding/hex"
"encoding/xml"
"fmt"
"io/ioutil"
"logger"
"math/rand"
"net"
"net/http"
"proto"
"rpc"
"rpcplus"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
)
type PayService struct {
pCachePool *common.CachePool
sl *common.SimpleLockService
payUrl string
}
const (
ORDER_TALBE = "order_table"
ITEM_TABLE = "item_table"
PREODER = "pre_order"
STATISTIC = "statistic"
)
var pPayService *PayService
//超时连接
func createTransport() *http.Transport {
return common.CreateTransport()
}
func CreatePayServer() {
var cfg common.PaySereverCfg
if err := common.ReadPayConfig(&cfg); err != nil {
logger.Error("ReadPayConfig failed", err)
return
}
pPayService = &PayService{
pCachePool: common.NewCachePool(cfg.Maincache),
sl: common.CreateSimpleLock(),
payUrl: cfg.Host,
}
//配置表
//connector.LoadConfigFiles(common.GetDesignerDir())
common.LoadGlobalConfig()
wg := sync.WaitGroup{}
wg.Add(2)
//监听内网
go pPayService.initTcp(&cfg, &wg)
//监听网页
go pPayService.initHttp(&cfg, &wg)
wg.Wait()
}
func (self *PayService) initTcp(cfg *common.PaySereverCfg, wg *sync.WaitGroup) error {
defer wg.Done()
//监听
listener, err := net.Listen("tcp", cfg.InnerHost)
if err != nil {
logger.Error("Listening to: %s %s", cfg.InnerHost, " failed !!")
return err
}
defer listener.Close()
rpcServer := rpcplus.NewServer()
rpcServer.Register(pPayService)
for {
conn, err := listener.Accept()
if err != nil {
logger.Error("payServer StartServices %s", err.Error())
break
}
go func() {
defer func() {
if r := recover(); r != nil {
logger.Info("payServer Rpc Runtime Error: %s", r)
debug.PrintStack()
}
}()
rpcServer.ServeConn(conn)
conn.Close()
}()
}
return nil
}
func (self *PayService) initHttp(cfg *common.PaySereverCfg, wg *sync.WaitGroup) error {
defer wg.Done()
http.HandleFunc("/", createHandleFunc(self.handle))
//对外
if err := http.ListenAndServe(cfg.Host, nil); err != nil {
return err
}
return nil
}
//回调函数创建
type handleCALLBACK func(w http.ResponseWriter, r *http.Request)
func createHandleFunc(f handleCALLBACK) handleCALLBACK {
return func(w http.ResponseWriter, r *http.Request) {
defer func() {
if r := recover(); r != nil {
errmsg := fmt.Sprintf("handle http failed :%s", r)
writeString(w, "serious err occurred :", errmsg)
return
}
}()
f(w, r)
}
}
const (
REQHEAD = "data_packet="
SUCCESSMSG = "success"
PAYTABLE = "tb_all_pay"
)
func (self *PayService) handle(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error("handle ReadAll body err", err)
return
}
logger.Info("################收到支付回调,原始数据:%v", string(b))
send := &stReturnWechat{}
send.ReturnCode = "FAIL"
rst := stWechatCallBack{}
err = xml.Unmarshal(b, &rst)
if err != nil {
logger.Error("微信支付回调,解析xml出错:%v", string(b))
writeXmlResult(w, send)
return
}
//取出订单验证
buf, err := common.Resis_getbuf(self.pCachePool, ORDER_TALBE, rst.OutTradeNo)
if err != nil {
logger.Error("微信支付回调, 取订单缓存失败, 订单号:%s, err:%s", rst.OutTradeNo, err)
writeXmlResult(w, send)
return
}
if buf == nil && err == nil {
logger.Error("微信支付回调, 回调重复")
send.ReturnMsg = "回调重复"
writeXmlResult(w, send)
return
}
orderInfo := &OrderInfo{}
if err := common.GobDecode(buf, orderInfo); err != nil {
logger.Error("微信支付回调, 解析订单缓存失败:%s", err)
writeXmlResult(w, send)
return
}
//缓存未完成订单信息
complexId := rst.TransactionId + "_" + orderInfo.ItemId + "_" + orderInfo.Uid + "_" + orderInfo.OpenId
err = common.Redis_setString(self.pCachePool, PREODER, rst.Openid, complexId)
if err != nil {
logger.Error("CreateOrder 缓存未完成订单失败 errorreq.Openid:%s", err, rst.Openid)
writeXmlResult(w, send)
return
}
//充值结果通知
payRst := &rpc.PayResultNotify{}
payRst.SetResult(false)
payRst.SetPartnerId(orderInfo.ItemId)
logger.Info("################收到支付回调:%v", rst)
if rst.ReturnCode != "SUCCESS" {
logger.Error("微信支付回调, 支持失败,原因:%s", rst.ReturnMsg)
send.ReturnCode = "FAIL"
payRst.SetErrorDesc(send.ReturnCode)
writeXmlResult(w, send)
centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
return
}
if rst.ResulCode != "SUCCESS" {
logger.Error("微信支付回调, 支持失败,code:%s 原因:%s", rst.ErrCode, rst.ErrCodeDes)
send.ReturnCode = "FAIL"
payRst.SetErrorDesc(send.ReturnCode)
writeXmlResult(w, send)
centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
return
}
// 验证订单
desiCfg := common.GetDesignerCfg()
sign := self.gen_vertify_sign(&rst, desiCfg.CPKey)
if sign != rst.Sign {
send.ReturnMsg = "订单验证失败"
logger.Error("订单验证失败,签名:%s, 签名wechat:%s", sign, rst.Sign)
payRst.SetErrorDesc(send.ReturnMsg)
writeXmlResult(w, send)
centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
return
}
// //验证订单
// if rst.MchId != orderInfo.PartnerId || rst.Openid != orderInfo.OpenId {
// logger.Error("订单验证失败rst.Appid:%s, orderInfo.AppId:%s , rst.Openid:%s, orderInfo.OpendId:%s",
// rst.MchId, orderInfo.PartnerId, rst.Openid, orderInfo.OpenId)
// send.ReturnMsg = "订单验证失败"
// payRst.SetErrorDesc(send.ReturnMsg)
// writeXmlResult(w, send)
// centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
// return
// }
//保存充值结果
err = common.Redis_setString(self.pCachePool, ITEM_TABLE, rst.Openid, orderInfo.ItemId)
if err != nil {
logger.Error("微信支付回调, 保存支付结果出错:uid, itemId:%s", orderInfo.Uid, orderInfo.ItemId)
payRst.SetErrorDesc("保存充值结果失败")
writeXmlResult(w, send)
centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
return
}
send.ReturnCode = "SUCCESS"
writeXmlResult(w, send)
//statistic recharge
self.statistic(rst.TotalFee)
//删除未完成订单信息
err = common.Redis_del(self.pCachePool, PREODER, rst.Openid)
if err != nil {
logger.Error("删除未完成的订单失败,uid:%s, ", rst.Openid)
return
}
//删除预付订单
err = common.Redis_del(self.pCachePool, ORDER_TALBE, rst.OutTradeNo)
if err != nil {
logger.Error("微信支付回调, 删除订单失败, 订单号:%s", rst.OutTradeNo)
return
}
payRst.SetResult(true)
centerclient.SendPayResult2Player([]string{orderInfo.Uid}, payRst)
}
func (self *PayService) statistic(total_fee string) {
logger.Info("statistic called total_fee:%s", total_fee)
if total_fee == "" {
return
}
fee, _ := strconv.Atoi(total_fee)
value, err := common.Redis_getInt(self.pCachePool, ORDER_TALBE, STATISTIC)
if err != nil {
logger.Error("statistic Redis_getInt err:%s", err)
return
}
err = common.Redis_setInt(self.pCachePool, ORDER_TALBE, STATISTIC, fee+value)
if err != nil {
logger.Error("statistic Redis_setInt err:%s", err)
return
}
}
func (self *PayService) GetRechargeStatistic(req *proto.RechargeStatisticReq, rst *proto.RechargeStatisticRst) error {
logger.Info("GetRechargeStatistic called")
value, err := common.Redis_getInt(self.pCachePool, ORDER_TALBE, STATISTIC)
if err != nil {
logger.Error("GetRechargeStatistic Redis_getInt err:%s", err)
return nil
}
rst.Value = value
return nil
}
//玩家创建订单
func (self *PayService) CreateOrder(req *proto.CreateOrder, rst *proto.CreateOrderRst) error {
logger.Info("CreateOrder has been called...")
self.sl.WaitLock(req.Uid)
defer self.sl.WaitUnLock(req.Uid)
client := &http.Client{
Transport: createTransport(),
}
desiCfg := common.GetDesignerCfg()
if desiCfg == nil {
logger.Error("CreateOrder 获取desinger.json出错")
return nil
}
//gen oder
orderInfo := &OrderInfo{}
orderNum := common.GenUUIDWith32(0)
orderInfo.OrderNum = orderNum
orderInfo.Uid = req.Uid
orderInfo.ItemId = req.ItemId
orderInfo.CreateTime = uint32(time.Now().Unix())
orderInfo.AppId = desiCfg.Appid
orderInfo.PartnerId = desiCfg.Mchid
orderInfo.OpenId = req.OpenId
prepayReq := &WechatPrepayReq{
Appid: orderInfo.AppId,
Mchid: orderInfo.PartnerId,
Noncestr: strconv.Itoa(rand.Intn(1000000)),
Body: "泸州棋牌-购买道具",
OutTradeNo: orderNum,
TotalFee: req.Money,
SpbillCreateIp: req.Ip,
NotifyUrl: self.payUrl,
TradeType: "APP",
}
prepayReq.Sign = self.gen_prepay_sign(prepayReq, desiCfg.CPKey)
logger.Info("CreateOrder 签名为:%s", prepayReq.Sign)
logger.Info("CreateOrder OrderInfo", prepayReq)
//生成xml
body, err := xml.MarshalIndent(prepayReq, " ", " ")
if err != nil {
logger.Error("CreateOrder MarshalIndent error: %v", err)
return err
}
buf := bytes.NewBuffer(body)
//提交请求
url := desiCfg.WeChatPayPreOrder
logger.Info("post url:", url)
res, err := client.Post(url, "application/x-www-form-urlencoded", buf)
b, err := ioutil.ReadAll(res.Body)
logger.Info("CreateOrder body info:%s", string(b))
res.Body.Close()
if err != nil {
logger.Error("CreateOrder ioutil.ReadAll error: %v", err)
return err
}
//解析xml
prepayRst := WechatPrepayRst{}
if err := xml.Unmarshal(b, &prepayRst); err != nil {
logger.Error("CreateOrder ioutil.ReadAll error: %v", err)
return nil
}
logger.Info("##########prepayRst:%v", prepayRst)
if prepayRst.ReturnMsg != "OK" {
logger.Error("CreateOrder FAILED, prepayRst.ReturnMsg:%s", prepayRst.ReturnMsg)
return nil
}
if prepayRst.ResultCode != "SUCCESS" {
logger.Error("CreateOrder FAILED, ErrorCode:%s, desc:%s", prepayRst.ErrCode, prepayRst.ErrCodeDes)
return nil
}
orderInfo.PrepayId = prepayRst.PrepayId
orderInfo.TimeStamp = strconv.Itoa(int(time.Now().Unix()))
orderInfo.NonceSt = strconv.Itoa(rand.Intn(1000000))
orderInfo.Sign = self.gen_prepay_sign2client(orderInfo, desiCfg.CPKey)
logger.Info("#########订单信息:%v", orderInfo)
newBuf, err := common.GobEncode(orderInfo)
if err != nil {
logger.Error("CreateOrder GobEncode error, req.Uid:%s, itemId:%s", err, req.Uid, req.ItemId)
return err
}
err = common.Resis_setbuf(self.pCachePool, ORDER_TALBE, orderNum, newBuf)
if err != nil {
logger.Error("CreateOrder setBuf errorreq.Uid:%s, itemId:%s", err, req.Uid, req.ItemId)
return err
}
err = common.Redis_setexpire(self.pCachePool, ORDER_TALBE, orderNum, "259200")
if err != nil {
logger.Error("CreateOrder setexpire errorreq.Uid:%s, itemId:%s", err, req.Uid, req.ItemId)
return err
}
rst.OrderNum = orderNum
rst.Appid = orderInfo.AppId
rst.Noncestr = orderInfo.NonceSt
rst.Package = "Sign=WXPay"
rst.Partnerid = orderInfo.PartnerId
rst.Prepayid = orderInfo.PrepayId
rst.Timestamp = orderInfo.TimeStamp
rst.Sign = orderInfo.Sign
return nil
}
func (self *PayService) gen_prepay_sign(info *WechatPrepayReq, key string) string {
money := strconv.Itoa(int(info.TotalFee))
stringA := "appid=" + info.Appid + "&body=" + info.Body +
"&mch_id=" + info.Mchid + "&nonce_str=" + info.Noncestr +
"¬ify_url=" + info.NotifyUrl + "&out_trade_no=" + info.OutTradeNo +
"&spbill_create_ip=" + info.SpbillCreateIp + "&total_fee=" + money +
"&trade_type=" + info.TradeType
stringSignTemp := stringA + "&key=" + key
logger.Info("签名前:%s", stringSignTemp)
h := md5.New()
h.Write([]byte(stringSignTemp))
cipherStr := hex.EncodeToString(h.Sum(nil))
return strings.ToUpper(cipherStr)
}
func (self *PayService) gen_prepay_sign2client(info *OrderInfo, key string) string {
stringA := "appid=" + info.AppId + "&noncestr=" + info.NonceSt +
"&package=" + "Sign=WXPay" + "&partnerid=" + info.PartnerId +
"&prepayid=" + info.PrepayId + "×tamp=" + info.TimeStamp
stringSignTemp := stringA + "&key=" + key
h := md5.New()
h.Write([]byte(stringSignTemp))
cipherStr := hex.EncodeToString(h.Sum(nil))
return strings.ToUpper(cipherStr)
}
//查询订单签名
func (self *PayService) gen_query_order_sign(info *stWechatPayQuery, key string) string {
stringA := "appid=" + info.Appid + "&mch_id=" + info.MchId +
"&nonce_str=" + info.NonceStr +
"&transaction_id=" + info.RransactionId
stringSignTemp := stringA + "&key=" + key
logger.Info("签名前:%s", stringSignTemp)
h := md5.New()
h.Write([]byte(stringSignTemp))
cipherStr := hex.EncodeToString(h.Sum(nil))
return strings.ToUpper(cipherStr)
}
// appid
// bank_type
// cash_fee
// fee_type
// is_subscribe
// mch_id
// nonce_str
// openid
// out_trade_no
// result_code
// return_code
// time_end
// total_fee
// trade_type
// transaction_id
// <appid><![CDATA[wxac6228496497182c]]></appid>
// <bank_type><![CDATA[CCB_DEBIT]]></bank_type>
// <cash_fee><![CDATA[1]]></cash_fee>
// <fee_type><![CDATA[CNY]]></fee_type>
// <is_subscribe><![CDATA[N]]></is_subscribe>
// <mch_id><![CDATA[1422709602]]></mch_id>
// <nonce_str><![CDATA[498081]]></nonce_str>
// <openid><![CDATA[oplHewf0AO3U-Aq9wYgHOhK-OVXA]]></openid>
// <out_trade_no><![CDATA[00000100c81e34593cc96e59da8ec213]]></out_trade_no>
// <result_code><![CDATA[SUCCESS]]></result_code>
// <return_code><![CDATA[SUCCESS]]></return_code>
// <sign><![CDATA[BE5FFAD6EA6E388FC1491CBA5CC3DC5B]]></sign>
// <time_end><![CDATA[20170604225311]]></time_end>
// <total_fee>1</total_fee>
// <trade_type><![CDATA[APP]]></trade_type>
// <transaction_id><![CDATA[4005302001201706044312028606]]></transaction_id>
func (self *PayService) gen_vertify_sign(info *stWechatCallBack, key string) string {
stringA := "appid=" + info.Appid + "&bank_type=" + info.BankType + "&cash_fee=" + info.CashFee +
"&fee_type=" + info.FeeType + "&is_subscribe=" + info.IsSubscribe +
"&mch_id=" + info.MchId + "&nonce_str=" + info.NonceStr +
"&openid=" + info.Openid + "&out_trade_no=" + info.OutTradeNo +
"&result_code=" + info.ResulCode + "&return_code=" + info.ReturnCode +
"&time_end=" + info.TimeEnd + "&total_fee=" + info.TotalFee +
"&trade_type=" + info.TradeType + "&transaction_id=" + info.TransactionId
stringSignTemp := stringA + "&key=" + key
h := md5.New()
h.Write([]byte(stringSignTemp))
cipherStr := hex.EncodeToString(h.Sum(nil))
return strings.ToUpper(cipherStr)
}
func (self *PayService) QueryOrder(openId string) {
client := &http.Client{
Transport: createTransport(),
}
logger.Info("查询订单,QueryOrder:%s", openId)
complexId, err := common.Redis_getString(self.pCachePool, PREODER, openId)
if err != nil {
logger.Error("QueryOrder 查询订单 common.Redis_getString error:", err, openId)
return
}
if complexId == "" {
logger.Info("此玩家没有未完成的订单:%s", openId)
return
}
ids := strings.Split(complexId, "_")
transactionId := ids[0]
desiCfg := common.GetDesignerCfg()
if desiCfg == nil {
logger.Error("查询订单 获取desinger.json出错")
return
}
//正确取出未完成的订单,到微信查询
prepayReq := &stWechatPayQuery{
Appid: desiCfg.Appid,
MchId: desiCfg.Mchid,
NonceStr: strconv.Itoa(rand.Intn(1000000)),
RransactionId: transactionId,
}
prepayReq.Sign = self.gen_query_order_sign(prepayReq, desiCfg.CPKey)
logger.Info("QueryOrder 签名为:%s", prepayReq.Sign)
//生成xml
body, err := xml.MarshalIndent(prepayReq, " ", " ")
if err != nil {
logger.Error("CreateOrder MarshalIndent error: %v", err)
return
}
bufBody := bytes.NewBuffer(body)
//提交请求
url := desiCfg.WeChatQueryUrl
logger.Info("post url:", url)
res, err := client.Post(url, "application/x-www-form-urlencoded", bufBody)
b, err := ioutil.ReadAll(res.Body)
logger.Info("QueryOrder body info:%s", string(b))
res.Body.Close()
if err != nil {
logger.Error("QueryOrder ioutil.ReadAll error: %v", err)
return
}
logger.Info("################查询订单,原始数据:%v", string(b))
rst := stWechatCallBack{}
err = xml.Unmarshal(b, &rst)
if err != nil {
logger.Error("查询订单,解析xml出错:%v", string(b))
return
}
logger.Info("################收到支付回调:%v", rst)
if rst.ReturnCode != "SUCCESS" {
logger.Error("查询订单, 支持失败,原因:%s", rst.ReturnMsg)
return
}
if rst.ResulCode != "SUCCESS" {
logger.Error("查询订单失败,code:%s 原因:%s", rst.ErrCode, rst.ErrCodeDes)
return
}
//取出订单验证
// buf, err := common.Resis_getbuf(self.pCachePool, ORDER_TALBE, rst.OutTradeNo)
// if err != nil {
// logger.Error("查询订单, 取订单缓存失败, 订单号:%s, err:%s", rst.OutTradeNo, err)
// return
// }
// if buf == nil && err == nil {
// logger.Error("查询订单, 回调重复")
// return
// }
// orderInfo := &OrderInfo{}
// if err := common.GobDecode(buf, orderInfo); err != nil {
// logger.Error("查询订单, 解析订单缓存失败:%s", err)
// return
// }
// 验证订单
sign := self.gen_vertify_sign(&rst, desiCfg.CPKey)
if rst.Sign != sign {
logger.Error("订单验证失败,签名:%s, 签名wechat:%s", sign, rst.Sign)
// return
}
// if rst.Openid != ids[3] {
// logger.Error("订单验证失败 rst.Openid:%s orderInfo.OpenId:%s", rst.Openid, ids[3])
// return
// }
//保存充值结果
err = common.Redis_setString(self.pCachePool, ITEM_TABLE, rst.Openid, ids[1])
if err != nil {
logger.Error("查询订单, 保存支付结果出错:openId:%s, itemId:%s", rst.Openid, ids[1])
return
}
//删除预付订单缓存
err = common.Redis_del(self.pCachePool, ORDER_TALBE, rst.OutTradeNo)
if err != nil {
logger.Error("查询订单, 删除订单失败, 订单号:%s", rst.OutTradeNo)
return
}
//删除未完成订单信息
err = common.Redis_del(self.pCachePool, PREODER, rst.Openid)
if err != nil {
logger.Error("删除未完成的订单失败,uid:%s, ", rst.Openid)
return
}
//通知玩家
payRst := &rpc.PayResultNotify{}
payRst.SetResult(true)
payRst.SetPartnerId(ids[1])
centerclient.SendCommonNotify2S([]string{ids[2]}, payRst, "PayResultNotify")
}
//玩家查询接口
func (self *PayService) QueryPayInfo(req *proto.QueryPayInfo, rst *proto.QueryPayInfoRst) error {
logger.Info("QueryPayInfo called..........")
self.sl.WaitLock(req.OpenId)
defer self.sl.WaitUnLock(req.OpenId)
rst.ItemId = ""
itemId, err := common.Redis_getString(self.pCachePool, ITEM_TABLE, req.OpenId)
if err != nil {
logger.Error("QueryPayInfo common.Redis_getString error:", err, req.OpenId)
return err
}
//如果没有充值成功的订单,则检查有没有失败的订单
if itemId == "" {
self.QueryOrder(req.OpenId)
return nil
}
err = common.Redis_del(self.pCachePool, ITEM_TABLE, req.OpenId)
if err != nil {
logger.Error("QueryPayInfo common.Redis_del error:", err, req.OpenId)
return err
}
rst.ItemId = itemId
return nil
}
func (self *PayService) DeletePayInfo(req *proto.QueryPayInfo, rst *proto.CommonRst) error {
logger.Info("DeletePayInfo called..........", req.OpenId)
self.sl.WaitLock(req.OpenId)
defer self.sl.WaitUnLock(req.OpenId)
err := common.Redis_del(self.pCachePool, ITEM_TABLE, req.OpenId)
if err != nil {
logger.Error("DeletePayInfo common.Redis_del error:", err, req.OpenId)
return err
}
return nil
}
|
package util
import "log"
type DB_Config struct {
Db_host string
Db_port string
Db_user string
Db_password string
Db_database string
Db_max_open int
Db_max_idle int
}
type Server_Config struct {
SSO string
SSO_Login string
SSO_Service_Validate string
OAuth string
OAuth_CAS_Check string
}
type Redis_Config struct {
Host string
Port string
}
var DB = new(DB_Config)
var Server = new(Server_Config)
var Redis = new(Redis_Config)
const LOG_FILE = "D:/oauth.log"
func init() {
DB.Db_host = "127.0.0.1"
DB.Db_port = "3306"
DB.Db_user = "root"
DB.Db_password = "1234"
DB.Db_database = "oauth"
DB.Db_max_open = 100
DB.Db_max_idle = 20
Server.SSO = "http://test.yourhost.com:5000"
Server.SSO_Login = "http://test.yourhost.com:5000/login"
Server.SSO_Service_Validate = "http://test.yourhost.com:5000/serviceValidate"
Server.OAuth = "http://test.yourhost.com:3000"
Server.OAuth_CAS_Check = "http://test.yourhost.com:3000/cas_check"
Redis.Host = "127.0.0.1"
Redis.Port = "6379"
log.Println("DB_CONFIG:"+DB.Db_host)
log.Println("DB_CONFIG:"+DB.Db_port)
log.Println("DB_CONFIG:"+DB.Db_user)
log.Println("DB_CONFIG:"+DB.Db_password)
log.Println("DB_CONFIG:"+DB.Db_database)
log.Println("DB_CONFIG:"+string(DB.Db_max_open))
log.Println("DB_CONFIG:"+string(DB.Db_max_idle))
log.Println("SERVER_CONFIG:"+Server.SSO)
log.Println("SERVER_CONFIG:"+Server.SSO_Login)
log.Println("SERVER_CONFIG:"+Server.SSO_Service_Validate)
log.Println("SERVER_CONFIG:"+Server.OAuth)
log.Println("SERVER_CONFIG:"+Server.OAuth_CAS_Check)
log.Println("REDIS_CONFIG:"+Redis.Host)
log.Println("REDIS_CONFIG:"+Redis.Port)
}
|
package service // import "yunion.io/x/onecloud/pkg/cloudid/service"
|
package backup
import (
"time"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
harvesterv1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
)
func isBackupReady(backup *harvesterv1.VirtualMachineBackup) bool {
return backup.Status != nil && backup.Status.ReadyToUse != nil && *backup.Status.ReadyToUse
}
func isBackupProgressing(backup *harvesterv1.VirtualMachineBackup) bool {
return vmBackupError(backup) == nil &&
(backup.Status == nil || backup.Status.ReadyToUse == nil || !*backup.Status.ReadyToUse)
}
func isBackupError(backup *harvesterv1.VirtualMachineBackup) bool {
return backup.Status != nil && backup.Status.Error != nil
}
func vmBackupError(vmBackup *harvesterv1.VirtualMachineBackup) *harvesterv1.Error {
if vmBackup.Status != nil && vmBackup.Status.Error != nil {
return vmBackup.Status.Error
}
return nil
}
func newReadyCondition(status corev1.ConditionStatus, message string) harvesterv1.Condition {
return harvesterv1.Condition{
Type: harvesterv1.BackupConditionReady,
Status: status,
Message: message,
LastTransitionTime: currentTime().Format(time.RFC3339),
}
}
func newProgressingCondition(status corev1.ConditionStatus, message string) harvesterv1.Condition {
return harvesterv1.Condition{
Type: harvesterv1.BackupConditionProgressing,
Status: status,
Message: message,
LastTransitionTime: currentTime().Format(time.RFC3339),
}
}
func updateBackupCondition(ss *harvesterv1.VirtualMachineBackup, c harvesterv1.Condition) {
ss.Status.Conditions = updateCondition(ss.Status.Conditions, c, false)
}
func updateCondition(conditions []harvesterv1.Condition, c harvesterv1.Condition, includeReason bool) []harvesterv1.Condition {
found := false
for i := range conditions {
if conditions[i].Type == c.Type {
if conditions[i].Status != c.Status || (includeReason && conditions[i].Reason != c.Reason) {
conditions[i] = c
}
found = true
break
}
}
if !found {
conditions = append(conditions, c)
}
return conditions
}
func translateError(e *snapshotv1.VolumeSnapshotError) *harvesterv1.Error {
if e == nil {
return nil
}
return &harvesterv1.Error{
Message: e.Message,
Time: e.Time,
}
}
// variable so can be overridden in tests
var currentTime = func() *metav1.Time {
t := metav1.Now()
return &t
}
|
package common
// LogoText is a text logo
var LogoText = `
____________ _____________ _____________ _ _____________
___ _/ / / / / _________/ / /
/ ________/ _/ ____ / / /_________ /____ ____/
_ ___/ /_______ / / / / /________ / / /
/ / / /___/ / _ __/ / _/ /
/ _______/ / ____ / _________/ / _ __/ /
_/ / __ / / / / / _ _ _/ / /
__ /____/ /____/ /___/ /_____________/ /____/
`
// HelpTextPage is the help text content
var HelpTextPage = `
____________ _____________ _____________ _ _____________
___ _/ / / / / _________/ / /
/ ________/ _/ ____ / / /_________ /____ ____/
_ ___/ /_______ / / / / /________ / / /
/ / / /___/ / _ __/ / _/ /
/ _______/ / ____ / _________/ / _ __/ /
_/ / __ / / / / / _ _ _/ / /
__ /____/ /____/ /___/ /_____________/ /____/
The first open-source Function-as-a-Service (FaaS) platform written in Go.
Please refer below for available FAST cli.
- fast help
This "help" command will show this help page.
- fast create {your_module_name}
This "create" command will create a .go file with default code as its content
- fast build {your_module_name}
This "build" command will export the .go file into .so file
- fast rm {your_module_name}
This "rm" command will remove both the .go and .so files
- fast start
This "start" command will start the FAST server
`
// DefaultModuleFile is the default content of module file
var DefaultModuleFile = `package main
import (
"net/http"
)
type {module_name} struct{}
func (m *{module_name}) Call() (interface{}, error) {
response := make(map[string]interface{})
response["code"] = http.StatusOK
response["data"] = m
return response, nil
}
var {module_title} {module_name}
`
|
package main
func (___Vtbm *_TtcpBufMachine) IRun(___Vidx int) {
switch ___Vidx {
case 1500101:
if nil == ___Vtbm.tbmCBinit {
go _FtcpBufMachine__1500101x__init(___Vtbm)
} else {
go ___Vtbm.
tbmCBinit(___Vtbm)
}
case 1500201:
go ___Vtbm.
_FtcpBufMachine__1500201x__chan_rece__default()
case 1500301:
go ___Vtbm.
_FtcpBufMachine__1500301x__timegap_timeout_delete()
case 1500302:
go ___Vtbm.
_FtcpBufMachine__1500302x__timegap_bufSendTunnelCheck()
default:
_FpfNex(" 834821 09 : unknown IRun : %d ", ___Vidx)
} // switch ___Vidx
}
func _FtcpBufMachine__1500101x__init(___Vtbm *_TtcpBufMachine) {
___Vtbm.tbmCHtcpLocal2RemoteBI = make(chan []byte, 50)
___Vtbm.tbmCHtcpLocal2RemoteCmdI = make(chan [17]byte, 50)
___Vtbm.tbmChCheckTunnelTimeOut = make(chan byte, 50)
___Vtbm.tbmChCheckLocal2RemoteGap = make(chan byte, 50)
switch _VS.RoleName {
case "Cn":
___Vtbm.tbmBufArr.tbaCntMax = 100 // Cn : 100 tunnel
case "Dn": // _TtcpBufferArrX
___Vtbm.tbmBufArr.tbaCntMax = 1000 // Cn : 1000 tunnel
case "Fn":
// do nothing.
default:
_FpfNex(" 834821 03 : unknown Role ")
}
___Vtbm.tbmBufArr.tbaCntFree = ___Vtbm.tbmBufArr.tbaCntMax
___Vtbm.tbmBufArr.tbaMbuftunnel = make([]_TtcpBuftunnel, ___Vtbm.tbmBufArr.tbaCntMax)
___Vtbm.tbmBufArr.tbaMtid = make(map[[16]byte]int)
_Fsleep(_T1s)
go _Frun(___Vtbm, 1500201) // _FtcpBufMachine__1500201x__chan_rece__default
go _Frun(___Vtbm, 1500301) // _FtcpBufMachine__1500301x__timegap_timeout_delete
go _Frun(___Vtbm, 1500302) // _FtcpBufMachine__1500302x__timegap_bufSendTunnelCheck
}
|
package main
import (
"log"
"Week02/service"
)
func main() {
svr := service.New()
userinfo, err := svr.GetUserInfo(1)
if err != nil {
log.Println("HTTP 500")
return
}
log.Println(userinfo)
}
|
package show
import "store"
func ShowEmployeeByID(id int, idEmpMap *map[int](store.Employee)){
empl := (*idEmpMap)[id]
if empl.There == true {empl.PrintName()}
} |
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package matrixstate
import (
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/params/manversion"
)
const logInfo = "matrix state"
var mangerAlpha *Manager
var mangerBeta *Manager
var mangerGamma *Manager
var mangerDelta *Manager
var mangerAIMine *Manager
var mangerZeta *Manager
var versionOpt MatrixOperator
func init() {
mangerAlpha = newManger(manversion.VersionAlpha)
mangerBeta = newManger(manversion.VersionBeta)
mangerGamma = newManger(manversion.VersionGamma)
mangerDelta = newManger(manversion.VersionDelta)
mangerAIMine = newManger(manversion.VersionAIMine)
mangerZeta = newManger(manversion.VersionZeta)
versionOpt = newVersionInfoOpt()
}
type MatrixOperator interface {
KeyHash() common.Hash
GetValue(st StateDB) (interface{}, error)
SetValue(st StateDB, value interface{}) error
}
type Manager struct {
version string
operators map[string]MatrixOperator
}
func GetManager(version string) *Manager {
switch version {
case manversion.VersionAlpha:
return mangerAlpha
case manversion.VersionBeta:
return mangerBeta
case manversion.VersionGamma:
return mangerGamma
case manversion.VersionDelta:
return mangerDelta
case manversion.VersionAIMine:
return mangerAIMine
case manversion.VersionZeta:
return mangerZeta
default:
log.Error(logInfo, "get Manger err", "version not exist", "version", version)
return nil
}
}
func (self *Manager) Version() string {
return self.version
}
func (self *Manager) FindOperator(key string) (MatrixOperator, error) {
opt, exist := self.operators[key]
if !exist {
log.Warn(logInfo, "find operator err", "not exist", "key", key, "version", self.version)
return nil, ErrOptNotExist
}
return opt, nil
}
func newManger(version string) *Manager {
switch version {
case manversion.VersionAlpha:
return &Manager{
version: version,
operators: map[string]MatrixOperator{
mc.MSKeyBroadcastTx: newBroadcastTxOpt(),
mc.MSKeyTopologyGraph: newTopologyGraphOpt(),
mc.MSKeyElectGraph: newELectGraphOpt(),
mc.MSKeyElectOnlineState: newELectOnlineStateOpt(),
mc.MSKeyBroadcastInterval: newBroadcastIntervalOpt(),
mc.MSKeyElectGenTime: newElectGenTimeOpt(),
mc.MSKeyElectMinerNum: newElectMinerNumOpt(),
mc.MSKeyElectConfigInfo: newElectConfigInfoOpt(),
mc.MSKeyElectBlackList: newElectBlackListOpt(),
mc.MSKeyElectWhiteList: newElectWhiteListOpt(),
mc.MSKeyElectWhiteListSwitcher: newElectWhiteListSwitcherOpt(),
mc.MSKeyAccountBroadcasts: newBroadcastAccountsOpt(),
mc.MSKeyAccountInnerMiners: newInnerMinerAccountsOpt(),
mc.MSKeyAccountFoundation: newFoundationAccountOpt(),
mc.MSKeyAccountVersionSupers: newVersionSuperAccountsOpt(),
mc.MSKeyAccountBlockSupers: newBlockSuperAccountsOpt(),
mc.MSKeyAccountMultiCoinSupers: newMultiCoinSuperAccountsOpt(),
mc.MSKeyAccountSubChainSupers: newSubChainSuperAccountsOpt(),
mc.MSKeyVIPConfig: newVIPConfigOpt(),
mc.MSKeyPreBroadcastRoot: newPreBroadcastRootOpt(),
mc.MSKeyLeaderConfig: newLeaderConfigOpt(),
mc.MSKeyMinHash: newMinHashOpt(),
mc.MSKeySuperBlockCfg: newSuperBlockCfgOpt(),
mc.MSKeyBlkRewardCfg: newBlkRewardCfgOpt(),
mc.MSKeyTxsRewardCfg: newTxsRewardCfgOpt(),
mc.MSKeyInterestCfg: newInterestCfgOpt(),
mc.MSKeyLotteryCfg: newLotteryCfgOpt(),
mc.MSKeySlashCfg: newSlashCfgOpt(),
mc.MSKeyPreMinerBlkReward: newPreMinerBlkRewardOpt(),
mc.MSKeyPreMinerTxsReward: newPreMinerTxsRewardOpt(),
mc.MSKeyUpTimeNum: newUpTimeNumOpt(),
mc.MSKeyLotteryNum: newLotteryNumOpt(),
mc.MSKeyLotteryAccount: newLotteryAccountOpt(),
mc.MSKeyInterestCalcNum: newInterestCalcNumOpt(),
mc.MSKeyInterestPayNum: newInterestPayNumOpt(),
mc.MSKeySlashNum: newSlashNumOpt(),
mc.MSKeyBlkCalc: newBlkCalcOpt(),
mc.MSKeyTxsCalc: newTxsCalcOpt(),
mc.MSKeyInterestCalc: newInterestCalcOpt(),
mc.MSKeyLotteryCalc: newLotteryCalcOpt(),
mc.MSKeySlashCalc: newSlashCalcOpt(),
mc.MSTxpoolGasLimitCfg: newTxpoolGasLimitOpt(),
mc.MSCurrencyConfig: newCurrencyPackOpt(),
mc.MSAccountBlackList: newAccountBlackListOpt(),
mc.MSKeyBlockProduceStatsStatus: newBlockProduceStatsStatusOpt(),
mc.MSKeyBlockProduceSlashCfg: newBlockProduceSlashCfgOpt(),
mc.MSKeyBlockProduceStats: newBlockProduceStatsOpt(),
mc.MSKeyBlockProduceBlackList: newBlockProduceBlackListOpt(),
},
}
case manversion.VersionBeta:
return &Manager{
version: version,
operators: map[string]MatrixOperator{
mc.MSKeyBroadcastTx: newBroadcastTxOpt(),
mc.MSKeyTopologyGraph: newTopologyGraphOpt(),
mc.MSKeyElectGraph: newELectGraphOpt(),
mc.MSKeyElectOnlineState: newELectOnlineStateOpt(),
mc.MSKeyBroadcastInterval: newBroadcastIntervalOpt(),
mc.MSKeyElectGenTime: newElectGenTimeOpt(),
mc.MSKeyElectMinerNum: newElectMinerNumOpt(),
mc.MSKeyElectConfigInfo: newElectConfigInfoOpt(),
mc.MSKeyElectBlackList: newElectBlackListOpt(),
mc.MSKeyElectWhiteList: newElectWhiteListOpt(),
mc.MSKeyElectWhiteListSwitcher: newElectWhiteListSwitcherOpt(),
mc.MSKeyAccountBroadcasts: newBroadcastAccountsOpt(),
mc.MSKeyAccountInnerMiners: newInnerMinerAccountsOpt(),
mc.MSKeyAccountFoundation: newFoundationAccountOpt(),
mc.MSKeyAccountVersionSupers: newVersionSuperAccountsOpt(),
mc.MSKeyAccountBlockSupers: newBlockSuperAccountsOpt(),
mc.MSKeyAccountMultiCoinSupers: newMultiCoinSuperAccountsOpt(),
mc.MSKeyAccountSubChainSupers: newSubChainSuperAccountsOpt(),
mc.MSKeyVIPConfig: newVIPConfigOpt(),
mc.MSKeyPreBroadcastRoot: newPreBroadcastRootOpt(),
mc.MSKeyLeaderConfig: newLeaderConfigOpt(),
mc.MSKeyMinHash: newMinHashOpt(),
mc.MSKeySuperBlockCfg: newSuperBlockCfgOpt(),
mc.MSKeyBlkRewardCfg: newBlkRewardCfgOpt(),
mc.MSKeyTxsRewardCfg: newTxsRewardCfgOpt(),
mc.MSKeyInterestCfg: newInterestCfgOpt(),
mc.MSKeyLotteryCfg: newLotteryCfgOpt(),
mc.MSKeySlashCfg: newSlashCfgOpt(),
mc.MSKeyPreMinerBlkReward: newPreMinerBlkRewardOpt(),
mc.MSKeyPreMinerTxsReward: newPreMinerMultiCoinTxsRewardOpt(),
mc.MSKeyUpTimeNum: newUpTimeNumOpt(),
mc.MSKeyLotteryNum: newLotteryNumOpt(),
mc.MSKeyLotteryAccount: newLotteryAccountOpt(),
mc.MSKeyInterestCalcNum: newInterestCalcNumOpt(),
mc.MSKeyInterestPayNum: newInterestPayNumOpt(),
mc.MSKeySlashNum: newSlashNumOpt(),
mc.MSKeyBlkCalc: newBlkCalcOpt(),
mc.MSKeyTxsCalc: newTxsCalcOpt(),
mc.MSKeyInterestCalc: newInterestCalcOpt(),
mc.MSKeyLotteryCalc: newLotteryCalcOpt(),
mc.MSKeySlashCalc: newSlashCalcOpt(),
mc.MSTxpoolGasLimitCfg: newTxpoolGasLimitOpt(),
mc.MSCurrencyConfig: newCurrencyPackOpt(),
mc.MSAccountBlackList: newAccountBlackListOpt(),
mc.MSKeyBlockProduceStatsStatus: newBlockProduceStatsStatusOpt(),
mc.MSKeyBlockProduceSlashCfg: newBlockProduceSlashCfgOpt(),
mc.MSKeyBlockProduceStats: newBlockProduceStatsOpt(),
mc.MSKeyBlockProduceBlackList: newBlockProduceBlackListOpt(),
},
}
case manversion.VersionGamma:
return &Manager{
version: version,
operators: map[string]MatrixOperator{
mc.MSKeyBroadcastTx: newBroadcastTxOpt(),
mc.MSKeyTopologyGraph: newTopologyGraphOpt(),
mc.MSKeyElectGraph: newELectGraphOpt(),
mc.MSKeyElectOnlineState: newELectOnlineStateOpt(),
mc.MSKeyBroadcastInterval: newBroadcastIntervalOpt(),
mc.MSKeyElectGenTime: newElectGenTimeOpt(),
mc.MSKeyElectMinerNum: newElectMinerNumOpt(),
mc.MSKeyElectConfigInfo: newElectConfigInfoOpt(),
mc.MSKeyElectBlackList: newElectBlackListOpt(),
mc.MSKeyElectWhiteList: newElectWhiteListOpt(),
mc.MSKeyElectWhiteListSwitcher: newElectWhiteListSwitcherOpt(),
mc.MSKeyAccountBroadcasts: newBroadcastAccountsOpt(),
mc.MSKeyAccountInnerMiners: newInnerMinerAccountsOpt(),
mc.MSKeyAccountFoundation: newFoundationAccountOpt(),
mc.MSKeyAccountVersionSupers: newVersionSuperAccountsOpt(),
mc.MSKeyAccountBlockSupers: newBlockSuperAccountsOpt(),
mc.MSKeyAccountMultiCoinSupers: newMultiCoinSuperAccountsOpt(),
mc.MSKeyAccountSubChainSupers: newSubChainSuperAccountsOpt(),
mc.MSKeyVIPConfig: newVIPConfigOpt(),
mc.MSKeyPreBroadcastRoot: newPreBroadcastRootOpt(),
mc.MSKeyLeaderConfig: newLeaderConfigOpt(),
mc.MSKeyMinHash: newMinHashOpt(),
mc.MSKeySuperBlockCfg: newSuperBlockCfgOpt(),
mc.MSKeyBlkRewardCfg: newBlkRewardCfgOpt(),
mc.MSKeyTxsRewardCfg: newTxsRewardCfgOpt(),
mc.MSKeyInterestCfg: newInterestCfgOpt(),
mc.MSKeyLotteryCfg: newLotteryCfgOpt(),
mc.MSKeySlashCfg: newSlashCfgOpt(),
mc.MSKeyPreMinerBlkReward: newPreMinerBlkRewardOpt(),
mc.MSKeyPreMinerTxsReward: newPreMinerMultiCoinTxsRewardOpt(),
mc.MSKeyUpTimeNum: newUpTimeNumOpt(),
mc.MSKeyLotteryNum: newLotteryNumOpt(),
mc.MSKeyLotteryAccount: newLotteryAccountOpt(),
mc.MSKeyInterestCalcNum: newInterestCalcNumOpt(),
mc.MSKeyInterestPayNum: newInterestPayNumOpt(),
mc.MSKeySlashNum: newSlashNumOpt(),
mc.MSKeyBlkCalc: newBlkCalcOpt(),
mc.MSKeyTxsCalc: newTxsCalcOpt(),
mc.MSKeyInterestCalc: newInterestCalcOpt(),
mc.MSKeyLotteryCalc: newLotteryCalcOpt(),
mc.MSKeySlashCalc: newSlashCalcOpt(),
mc.MSTxpoolGasLimitCfg: newTxpoolGasLimitOpt(),
mc.MSCurrencyConfig: newCurrencyPackOpt(),
mc.MSAccountBlackList: newAccountBlackListOpt(),
mc.MSKeyBlockProduceStatsStatus: newBlockProduceStatsStatusOpt(),
mc.MSKeyBlockProduceSlashCfg: newBlockProduceSlashCfgOpt(),
mc.MSKeyBlockProduceStats: newBlockProduceStatsOpt(),
mc.MSKeyBlockProduceBlackList: newBlockProduceBlackListOpt(),
mc.MSKeySelMinerNum: newSelMinerNumOpt(),
},
}
case manversion.VersionDelta, manversion.VersionAIMine, manversion.VersionZeta:
return &Manager{
version: version,
operators: map[string]MatrixOperator{
mc.MSKeyBroadcastTx: newBroadcastTxOpt(),
mc.MSKeyTopologyGraph: newTopologyGraphOpt(),
mc.MSKeyElectGraph: newELectGraphOpt(),
mc.MSKeyElectOnlineState: newELectOnlineStateOpt(),
mc.MSKeyBroadcastInterval: newBroadcastIntervalOpt(),
mc.MSKeyElectGenTime: newElectGenTimeOpt(),
mc.MSKeyElectMinerNum: newElectMinerNumOpt(),
mc.MSKeyElectConfigInfo: newElectConfigInfoOpt(),
mc.MSKeyElectBlackList: newElectBlackListOpt(),
mc.MSKeyElectWhiteList: newElectWhiteListOpt(),
mc.MSKeyElectWhiteListSwitcher: newElectWhiteListSwitcherOpt(),
mc.MSKeyAccountBroadcasts: newBroadcastAccountsOpt(),
mc.MSKeyAccountInnerMiners: newInnerMinerAccountsOpt(),
mc.MSKeyAccountFoundation: newFoundationAccountOpt(),
mc.MSKeyAccountVersionSupers: newVersionSuperAccountsOpt(),
mc.MSKeyAccountBlockSupers: newBlockSuperAccountsOpt(),
mc.MSKeyAccountMultiCoinSupers: newMultiCoinSuperAccountsOpt(),
mc.MSKeyAccountSubChainSupers: newSubChainSuperAccountsOpt(),
mc.MSKeyVIPConfig: newVIPConfigOpt(),
mc.MSKeyPreBroadcastRoot: newPreBroadcastRootOpt(),
mc.MSKeyLeaderConfig: newLeaderConfigOpt(),
mc.MSKeyMinHash: newMinHashOpt(),
mc.MSKeySuperBlockCfg: newSuperBlockCfgOpt(),
mc.MSKeyMinimumDifficulty: newMinDiffcultyOpt(),
mc.MSKeyMaximumDifficulty: newMaxDiffcultyOpt(),
mc.MSKeyReelectionDifficulty: newReelectionDiffcultyOpt(),
mc.MSKeyBlockDurationStatus: newBlockDurationOpt(),
mc.MSKeyBlkRewardCfg: newBlkRewardCfgOpt(),
mc.MSKeyAIBlkRewardCfg: newAIBlkRewardCfgOpt(),
mc.MSKeyTxsRewardCfg: newTxsRewardCfgOpt(),
mc.MSKeyInterestCfg: newInterestCfgOpt(),
mc.MSKeyLotteryCfg: newLotteryCfgOpt(),
mc.MSKeySlashCfg: newSlashCfgOpt(),
mc.MSKeyPreMinerBlkReward: newPreMinerBlkRewardOpt(),
mc.MSKeyPreMinerTxsReward: newPreMinerMultiCoinTxsRewardOpt(),
mc.MSKeyUpTimeNum: newUpTimeNumOpt(),
mc.MSKeyLotteryNum: newLotteryNumOpt(),
mc.MSKeyLotteryAccount: newLotteryAccountOpt(),
mc.MSKeyInterestCalcNum: newInterestCalcNumOpt(),
mc.MSKeyInterestPayNum: newInterestPayNumOpt(),
mc.MSKeySlashNum: newSlashNumOpt(),
mc.MSKeySelMinerNum: newSelMinerNumOpt(),
mc.MSKeyBLKSelValidatorNum: newSelValidatorBLKNumOpt(),
mc.MSKeyBLKSelValidator: newValidatorBLKSelRewardOpt(),
mc.MSKeyTXSSelValidatorNum: newSelValidatorTXSNumOpt(),
mc.MSKeyTXSSelValidator: newValidatorTXSSelRewardOpt(),
mc.MSKeyBlkCalc: newBlkCalcOpt(),
mc.MSKeyTxsCalc: newTxsCalcOpt(),
mc.MSKeyInterestCalc: newInterestCalcOpt(),
mc.MSKeyLotteryCalc: newLotteryCalcOpt(),
mc.MSKeySlashCalc: newSlashCalcOpt(),
mc.MSTxpoolGasLimitCfg: newTxpoolGasLimitOpt(),
mc.MSCurrencyConfig: newCurrencyPackOpt(),
mc.MSAccountBlackList: newAccountBlackListOpt(),
mc.MSKeyBlockProduceStatsStatus: newBlockProduceStatsStatusOpt(),
mc.MSKeyBlockProduceSlashCfg: newBlockProduceSlashCfgOpt(),
mc.MSKeyBlockProduceStats: newBlockProduceStatsOpt(),
mc.MSKeyBlockProduceBlackList: newBlockProduceBlackListOpt(),
mc.MSKeyBasePowerStatsStatus: newBasePowerStatsStatusOpt(),
mc.MSKeyBasePowerSlashCfg: newBasePowerSlashCfgOpt(),
mc.MSKeyBasePowerStats: newBasePowerStatsOpt(),
mc.MSKeyBasePowerBlackList: newBasePowerBlackListOpt(),
mc.MSKeyElectDynamicPollingInfo: newDynamicPollingOpt(),
mc.MSCurrencyHeader: newCurrencyHeaderCfgOpt(),
},
}
default:
log.Error(logInfo, "创建管理类", "失败", "版本", version)
return nil
}
}
|
package controller
import (
"encoding/json"
"fmt"
"gin-use/configs"
"gin-use/src/global"
"gin-use/src/model/response"
"gin-use/src/util/consul"
"gin-use/src/util/snowflake"
"net/http"
"reflect"
"time"
"github.com/ddliu/go-httpclient"
"github.com/gin-gonic/gin"
)
var resp *response.Resp
// Health 健康检查
// @Summary 健康检查接口
// @Description 服务是否启动正常检查
// @Tags 监测服务
// @Accept application/json
// @Produce application/json
// @Param name query string false "用户名"
// @Success 200
// @Router /health [get]
func Health(c *gin.Context) {
ip := configs.GetLocalIp()
now := time.Now()
id := snowflake.GenerateId()
workerid, datacenterid := snowflake.GetDeviceID(id)
data := map[string]interface{}{"ip": ip, "Time": now, "id": id, "datacenterid": datacenterid, "workerid": workerid}
ResponseHttpOK("ok", "请求成功", data, c)
}
//api响应值
func Response(httpCode int, code string, msg string, data interface{}, c *gin.Context) {
//反射判断interface是否为空值
if reflect.TypeOf(data) != nil {
c.JSON(httpCode, response.Resp{ code,msg,data })
}else{
c.JSON(httpCode, response.Resp{ code,msg,map[string]interface{}{} })
}
}
//成功响应
func ResponseHttpOK( code string, msg string, data interface{}, c *gin.Context) {
//反射判断interface是否为空值
if reflect.TypeOf(data) != nil {
c.JSON(http.StatusOK, response.Resp{ code,msg,data })
}else{
c.JSON(http.StatusOK, response.Resp{ code,msg,map[string]interface{}{} })
}
}
func test() {
// 从consul中发现服务
xichengCommon := consul.FindServer("xicheng-common", "")
global.Logger.Info("嘿,我能调用了 xicheng-common服务, xichengCommon:%s", xichengCommon)
api := xichengCommon + "/api/health"
res, err := httpclient.Get(api)
if err != nil {
global.Logger.Errorf("http-client, err:%v", err)
}
bodyString, err := res.ToString()
if err != nil {
fmt.Println("--------err----", err)
}
fmt.Println("--------bodyString----", bodyString)
json.Unmarshal([]byte(string(bodyString)), &resp)
fmt.Println("------------", resp.Code)
} |
package webhook
const (
// InjectionInstanceLabel can be set in a Namespace and indicates the corresponding DynaKube object assigned to it.
InjectionInstanceLabel = "dynakube.internal.dynatrace.com/instance"
// AnnotationDynatraceInjected is set to "true" by the webhook to Pods to indicate that it has been injected.
AnnotationDynatraceInjected = "dynakube.dynatrace.com/injected"
// AnnotationDynatraceInject is set to "false" on the Pod to indicate that does not want any injection.
AnnotationDynatraceInject = "dynatrace.com/inject"
OneAgentPrefix = "oneagent"
// AnnotationOneAgentInject can be set at pod level to enable/disable OneAgent injection.
AnnotationOneAgentInject = OneAgentPrefix + ".dynatrace.com/inject"
AnnotationOneAgentInjected = OneAgentPrefix + ".dynatrace.com/injected"
DataIngestPrefix = "data-ingest"
// AnnotationDataIngestInject can be set at pod level to enable/disable data-ingest injection.
AnnotationDataIngestInject = DataIngestPrefix + ".dynatrace.com/inject"
AnnotationDataIngestInjected = DataIngestPrefix + ".dynatrace.com/injected"
// AnnotationFlavor can be set on a Pod to configure which code modules flavor to download. It's set to "default"
// if not set.
AnnotationFlavor = "oneagent.dynatrace.com/flavor"
// AnnotationTechnologies can be set on a Pod to configure which code module technologies to download. It's set to
// "all" if not set.
AnnotationTechnologies = "oneagent.dynatrace.com/technologies"
// AnnotationInstallPath can be set on a Pod to configure on which directory the OneAgent will be available from,
// defaults to DefaultInstallPath if not set.
AnnotationInstallPath = "oneagent.dynatrace.com/install-path"
// AnnotationInstallerUrl can be set on a Pod to configure the installer url for downloading the agent
// defaults to the PaaS installer download url of your tenant
AnnotationInstallerUrl = "oneagent.dynatrace.com/installer-url"
// AnnotationFailurePolicy can be set on a Pod to control what the init container does on failures. When set to
// "fail", the init container will exit with error code 1. Defaults to "silent".
AnnotationFailurePolicy = "oneagent.dynatrace.com/failure-policy"
// DefaultInstallPath is the default directory to install the app-only OneAgent package.
DefaultInstallPath = "/opt/dynatrace/oneagent-paas"
// SecretCertsName is the name of the secret where the webhook certificates are stored.
SecretCertsName = "dynatrace-webhook-certs"
// DeploymentName is the name used for the Deployment of any webhooks and WebhookConfiguration objects.
DeploymentName = "dynatrace-webhook"
WebhookContainerName = "webhook"
// InstallContainerName is the name used for the install container
InstallContainerName = "install-oneagent"
)
|
package do
import (
"database/sql"
"time"
)
type UserDo struct {
ID string `ddb:"id"`
Name string `ddb:"name"`
Password sql.RawBytes `ddb:"password"`
Email string `ddb:"email"`
Version int `ddb:"version"`
CTime time.Time `ddb:"ctime"`
MTime time.Time `ddb:"mtime"`
}
|
package models
import "testing"
func TestNewEntityIDFromString(t *testing.T) {
str := "@user@cadmium.org"
eid, err := NewEntityIDFromString(str)
if err != nil {
t.Fatal("error must be null")
}
if eid.Attr != "" || eid.Type != UsernameType || eid.ServerPart != "cadmium.org" || eid.LocalPart != "user" {
t.FailNow()
}
}
func TestNewEntityIDFromStringWithAttr(t *testing.T) {
str := "%msisdn:18002003040@cadmium.org"
eid, err := NewEntityIDFromString(str)
if err != nil {
t.Fatal("error must be null")
}
if eid.Attr != "msisdn" || eid.Type != ThirdPIDType || eid.ServerPart != "cadmium.org" || eid.LocalPart != "18002003040" {
t.FailNow()
}
}
func TestNewEntityIDFromStringWithEmailAttr(t *testing.T) {
str := "%email:abslimit_netwhood.online@cadmium.org"
eid, err := NewEntityIDFromString(str)
if err != nil {
t.Fatal("error must be null")
}
if eid.Attr != "email" || eid.Type != ThirdPIDType || eid.ServerPart != "cadmium.org" || eid.LocalPart != "abslimit_netwhood.online" {
t.Fatal(eid.String())
}
}
func TestNewEntityIDFromStringWithOnlyServerPart(t *testing.T) {
str := "cadmium.org"
eid, err := NewEntityIDFromString(str)
if err != nil {
t.Fatal("error must be null")
}
if !eid.OnlyServerPart && eid.ServerPart != "cadmium.org" {
t.Fatal(eid.String())
}
}
|
package spotifaux
import "C"
import "math"
const SS_FFT_LENGTH = 800
const WindowLength = 400
const Hop = 160
const SAMPLE_RATE = 16000
type SoundSpotter struct {
ChosenFeatures []int
CqtN int // number of constant-Q coefficients (automatic)
InShingles [][]float64
ShingleSize int
}
func (s *SoundSpotter) Output(fileName string, winner int, inPower float64) ([]float64, error) {
outputLength := Hop * s.ShingleSize
outputBuffer := make([]float64, outputLength) // fix size at constructor ?
if winner > -1 {
sf, err := NewSoundFile(fileName)
if err != nil {
panic(err)
}
defer sf.Close()
_, err = sf.Seek(int64(winner * Hop))
if err != nil {
return nil, err
}
buf := make([]float64, outputLength)
_, err = sf.ReadFrames(buf)
if err != nil {
return nil, err
}
dbPower := 0.0
for _, val := range buf {
dbPower += math.Pow(val, 2)
}
dbPower /= float64(len(buf))
// Envelope follow factor is alpha * sqrt(env1/env2) + (1-alpha)
// sqrt(env2) has already been calculated, only take sqrt(env1) here
envFollow := 1.0
alpha := envFollow*math.Sqrt(inPower/dbPower) + (1.0 - envFollow)
for p := 0; p < outputLength; p++ {
output := alpha * buf[p]
if output > 1.12 {
output = 1.12
} else if output < -1.12 {
output = -1.12
}
outputBuffer[p] = output * 0.8
}
}
return outputBuffer, nil
}
|
package solutions
type MaskPair struct {
mask int
length int
}
func maxProduct(words []string) int {
masks := make([]MaskPair, len(words))
for i := 0; i < len(words); i++ {
masks[i] = MaskPair{mask: getBitMask(words[i]), length: len(words[i])}
}
result := 0
for i := 0; i < len(masks); i++ {
for j := i + 1; j < len(masks); j++ {
if masks[i].mask & masks[j].mask != 0 {
continue
}
current := masks[i].length * masks[j].length
if current > result {
result = current
}
}
}
return result
}
func getBitMask(s string) int {
result := 0
for i := 0; i < len(s); i++ {
result |= 1 << int(s[i] - 'a')
}
return result
}
|
package Model
type Referal struct {
Id int `form:"id" json:"id"`
ReferalCode string `form:"referal_code" json:"referal_code"`
MuridID int `form:"murid_id" json:"murid_id"`
Used bool `form:"used" json:"used"`
}
type ResponseReferal struct {
Status int `json:"status"`
Message string `json:"message"`
Data []Referal `json:"data"`
}
type ResponseReferall struct {
Status int `json:"status"`
Message string `json:"message"`
Data Referal `json:"data"`
}
|
package hnsvc_test
import (
stdlog "log"
"testing"
"github.com/stretchr/testify/assert"
"github.com/wenerme/uo/pkg/hnsvc"
)
func TestBasic(t *testing.T) {
// os.Setenv("HTTP_PROXY", "socks5://127.0.0.1:8888")
s := &hnsvc.HackerNewsService{}
{
it, err := s.GetItem(8863)
assert.NoError(t, err)
assert.Equal(t, 8863, it.ID)
assert.Equal(t, "My YC app: Dropbox - Throw away your USB drive", it.Title)
}
{
v, err := s.GetUser("wener")
assert.NoError(t, err)
assert.Equal(t, "wener", v.ID)
}
{
v, err := s.MaxItemID()
assert.NoError(t, err)
assert.Greater(t, v, 100000)
stdlog.Printf("Max Item ID %v", v)
}
{
v, err := s.TopStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.NewsStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.BestStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.AstStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.ShowStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.JobStoryIds()
assert.NoError(t, err)
assert.NotEmpty(t, v)
}
{
v, err := s.Updates()
assert.NoError(t, err)
assert.NotEmpty(t, v.Items)
assert.NotEmpty(t, v.Profiles)
stdlog.Printf("Updates %#v", v)
}
}
|
package routers
import (
v1 "deepgo/download/api/v1"
"deepgo/download/middleware"
"deepgo/download/pkg/setting"
"github.com/gin-gonic/gin"
"net/http"
)
func InitRouter() *gin.Engine {
r := gin.New()
r.Use(gin.Logger())
r.Use(gin.Recovery())
gin.SetMode(setting.RunMode)
r.Use(middleware.Cors())
apiv1 := r.Group("/api/v1")
{
//获取软件列表
apiv1.GET("/software", v1.GetSoftware)
}
r.GET("/", func(c *gin.Context) {
c.Redirect(http.StatusMovedPermanently, "static/html")
})
r.GET("/test", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "test",
})
})
r.Use(middleware.Cors())
return r
}
|
package putils
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/tkanos/gonfig"
)
type Configuration struct {
Delay int
Type string
Sensor string
AITA string
TOPIC string
PORT int
HOST string
}
func GetConfig() Configuration {
configuration := Configuration{}
dir, err := os.Getwd()
if err != nil {
panic(err)
}
err = gonfig.GetConf(filepath.Join(dir, "configs", "fakeiot_wind_config.json"), &configuration)
if err != nil {
panic(err)
}
return configuration
}
func SetConfig(delay int, sensorType string, sensorID string, aita string, topic string, host string, port int) Configuration {
newConfig := Configuration{
Delay: delay,
Type: sensorType,
Sensor: sensorID,
AITA: aita,
TOPIC: topic,
PORT: port,
HOST: host,
}
fmt.Println(newConfig)
return newConfig
}
// TimeToDate will convert a golang `time.Now()` to a YYYY-MM-DD date
func TimeToDate(t time.Time) string {
return fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), t.Day())
}
// Sleep <ms> milliseconds
func Sleep(ms int) {
time.Sleep(time.Duration(ms) * time.Millisecond)
}
// Aita is a list of airport codes
var Aita = []string{"AMS", "ATL", "BKK", "CAN", "CDG", "DEL", "DEN", "DFW", "DXB", "FRA", "HKG", "HND", "ICN", "JFK", "LAX", "LHR", "ORD", "PEK", "PVG", "SIN"}
// Airport is an airport's data
type Airport struct {
Aita string
Name string
}
// AitaFull is a list of airport codes with its full name
var AitaFull = []Airport{
Airport{"AMS", "Amsterdam Airport Schiphol"},
Airport{"ATL", "Hartsfield–Jackson Atlanta International Airport"},
Airport{"BKK", "Suvarnabhumi Airport"},
Airport{"CAN", "Guangzhou Baiyun International Airport"},
Airport{"CDG", "Paris Charles de Gaulle Airport"},
Airport{"DEL", "Indira Gandhi International Airport"},
Airport{"DEN", "Denver International Airport"},
Airport{"DFW", "Dallas/Fort Worth International Airport"},
Airport{"DXB", "Dubai International Airport"},
Airport{"FRA", "Frankfurt am Main Airport"},
Airport{"HKG", "Hong Kong International Airport"},
Airport{"HND", "Tokyo International Airport"},
Airport{"ICN", "Incheon International Airport"},
Airport{"JFK", "John F. Kennedy International Airport"},
Airport{"LAX", "Los Angeles International Airport"},
Airport{"LHR", "Heathrow Airport"},
Airport{"ORD", "O'Hare International Airport"},
Airport{"PEK", "Beijing Capital International Airport"},
Airport{"PVG", "Shanghai Pudong International Airport"},
Airport{"SIN", "Singapore Changi Airport"},
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rbdutils
import (
"fmt"
"github.com/ceph/go-ceph/rados"
"github.com/ceph/go-ceph/rbd"
"yunion.io/x/pkg/errors"
)
type SCluster struct {
conn *rados.Conn
}
func (self *SCluster) withCluster(doFunc func(*rados.Conn) (interface{}, error)) (interface{}, error) {
defer self.conn.Shutdown()
return doFunc(self.conn)
}
type SPool struct {
name string
cluster *SCluster
}
func (self *SPool) withIOContext(doFunc func(*rados.IOContext) (interface{}, error)) (interface{}, error) {
return self.cluster.withCluster(func(conn *rados.Conn) (interface{}, error) {
ioctx, err := conn.OpenIOContext(self.name)
if err != nil {
return nil, errors.Wrapf(err, "OpenIOContext(%s)", self.name)
}
return doFunc(ioctx)
})
}
func (self *SPool) GetCluster() *SCluster {
return self.cluster
}
func NewCluster(monHost, key string) (*SCluster, error) {
conn, err := rados.NewConn()
if err != nil {
return nil, err
}
for k, v := range map[string]string{"mon_host": monHost, "key": key} {
if len(v) > 0 {
err = conn.SetConfigOption(k, v)
if err != nil {
return nil, errors.Wrapf(err, "SetConfigOption %s %s", k, v)
}
}
}
for k, v := range map[string]int64{
"rados_osd_op_timeout": 20 * 60,
"rados_mon_op_timeout": 5,
"client_mount_timeout": 2 * 60,
} {
err = conn.SetConfigOption(k, fmt.Sprintf("%d", v))
if err != nil {
return nil, errors.Wrapf(err, "SetConfigOption %s %d", k, v)
}
}
err = conn.Connect()
if err != nil {
return nil, errors.Wrapf(err, "conn.Connect")
}
return &SCluster{conn: conn}, nil
}
func (self *SCluster) GetPool(name string) (*SPool, error) {
return &SPool{name: name, cluster: self}, nil
}
func (self *SCluster) ListPools() ([]string, error) {
pools, err := self.withCluster(func(conn *rados.Conn) (interface{}, error) {
return conn.ListPools()
})
if err != nil {
return nil, errors.Wrapf(err, "ListPools")
}
return pools.([]string), nil
}
func (self *SCluster) GetClusterStats() (rados.ClusterStat, error) {
stat, err := self.withCluster(func(conn *rados.Conn) (interface{}, error) {
return conn.GetClusterStats()
})
if err != nil {
return rados.ClusterStat{}, errors.Wrapf(err, "GetClusterStats")
}
return stat.(rados.ClusterStat), nil
}
func (self *SCluster) GetFSID() (string, error) {
fsid, err := self.withCluster(func(conn *rados.Conn) (interface{}, error) {
return conn.GetFSID()
})
if err != nil {
return "", errors.Wrapf(err, "GetFSID")
}
return fsid.(string), nil
}
func (self *SCluster) DeletePool(pool string) error {
_, err := self.withCluster(func(conn *rados.Conn) (interface{}, error) {
return nil, conn.DeletePool(pool)
})
return errors.Wrapf(err, "DeletePool")
}
type cmdOutput struct {
Buffer string
Info string
}
func (self *SCluster) MonCommand(args []byte) (cmdOutput, error) {
result := cmdOutput{}
_, err := self.withCluster(func(conn *rados.Conn) (interface{}, error) {
buffer, info, err := conn.MonCommand(args)
if err != nil {
return nil, errors.Wrapf(err, "MonCommand")
}
result.Buffer = string(buffer)
result.Info = info
return nil, nil
})
return result, errors.Wrapf(err, "DeletePool")
}
func (self *SPool) ListImages() ([]string, error) {
images, err := self.withIOContext(func(ioctx *rados.IOContext) (interface{}, error) {
return rbd.GetImageNames(ioctx)
})
if err != nil {
return nil, errors.Wrapf(err, "GetImageNames")
}
return images.([]string), nil
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"context"
"testing"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
var validEventsFile = `{"timestamp":"2021-08-11T19:19:41.711480752Z","event":{"metaEvent":{"entry":"Starting Skaffold: \u0026{Version:v1.29.0 ConfigVersion:skaffold/v2beta20 GitVersion: GitCommit:39371bb996a3c39c3d4fa8749cabe173c5f45b3a BuildDate:2021-08-02T17:52:01Z GoVersion:go1.14.14 Compiler:gc Platform:linux/amd64 User:}","metadata":{"build":{"numberOfArtifacts":1,"builders":[{"type":"DOCKER","count":1}],"type":"LOCAL"},"deploy":{"deployers":[{"type":"HELM","count":1}],"cluster":"MINIKUBE"}}}}}
{"timestamp":"2021-08-11T19:19:41.756663171Z","event":{"devLoopEvent":{"status":"In Progress"}},"entry":"Update initiated"}
{"timestamp":"2021-08-11T19:19:41.763416940Z","event":{"buildEvent":{"artifact":"skaffold-helm","status":"In Progress"}},"entry":"Build started for artifact skaffold-helm"}
{"timestamp":"2021-08-11T19:19:45.685909133Z","event":{"buildEvent":{"artifact":"skaffold-helm","status":"Complete"}},"entry":"Build completed for artifact skaffold-helm"}
{"timestamp":"2021-08-11T19:19:45.686277380Z","event":{"deployEvent":{"status":"In Progress"}},"entry":"Deploy started"}
{"timestamp":"2021-08-11T19:19:46.624504850Z","event":{"deployEvent":{"status":"Complete"}},"entry":"Deploy completed"}
{"timestamp":"2021-08-11T19:19:46.624550647Z","event":{"statusCheckEvent":{"status":"Started"}},"entry":"Status check started"}
{"timestamp":"2021-08-11T19:19:48.719236104Z","event":{"resourceStatusCheckEvent":{"resource":"deployment/skaffold-helm","status":"Succeeded","message":"Succeeded","statusCode":"STATUSCHECK_SUCCESS"}},"entry":"Resource deployment/skaffold-helm status completed successfully"}
{"timestamp":"2021-08-11T19:19:48.719307379Z","event":{"statusCheckEvent":{"status":"Succeeded"}},"entry":"Status check succeeded"}
{"timestamp":"2021-08-11T19:19:48.734465633Z","event":{"devLoopEvent":{"status":"Succeeded"}},"entry":"Update succeeded"}
{"timestamp":"2021-08-11T19:19:51.740854617Z","event":{"devLoopEvent":{"iteration":1,"status":"In Progress"}},"entry":"Update initiated"}
{"timestamp":"2021-08-11T19:19:51.744239521Z","event":{"buildEvent":{"artifact":"skaffold-helm","status":"In Progress"}},"entry":"Build started for artifact skaffold-helm"}
{"timestamp":"2021-08-11T19:19:55.757451860Z","event":{"buildEvent":{"artifact":"skaffold-helm","status":"Complete"}},"entry":"Build completed for artifact skaffold-helm"}
{"timestamp":"2021-08-11T19:19:55.757928417Z","event":{"deployEvent":{"status":"In Progress"}},"entry":"Deploy started"}
{"timestamp":"2021-08-11T19:19:56.728808748Z","event":{"deployEvent":{"status":"Complete"}},"entry":"Deploy completed"}
{"timestamp":"2021-08-11T19:19:56.728840707Z","event":{"statusCheckEvent":{"status":"Started"}},"entry":"Status check started"}
{"timestamp":"2021-08-11T19:20:00.823570232Z","event":{"resourceStatusCheckEvent":{"resource":"deployment/skaffold-helm","status":"Succeeded","message":"Succeeded","statusCode":"STATUSCHECK_SUCCESS"}},"entry":"Resource deployment/skaffold-helm status completed successfully"}
{"timestamp":"2021-08-11T19:20:00.823640653Z","event":{"statusCheckEvent":{"status":"Succeeded"}},"entry":"Status check succeeded"}
{"timestamp":"2021-08-11T19:20:00.823857159Z","event":{"devLoopEvent":{"iteration":1,"status":"Succeeded"}},"entry":"Update succeeded"}
`
var invalidEventsFile = `invalid-events-file`
func TestParseEventDuration(t *testing.T) {
tests := []struct {
description string
eventsFileText string
shouldErr bool
expectedDevLoops int
}{
{
description: "valid events file",
eventsFileText: validEventsFile,
expectedDevLoops: 1,
},
{
description: "invalid events file",
eventsFileText: invalidEventsFile,
shouldErr: true,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
fp := t.TempFile("TestParseEventDuration-", []byte(test.eventsFileText))
devLoopTimes, err := ParseEventDuration(context.Background(), fp)
t.CheckError(test.shouldErr, err)
if !test.shouldErr {
if len(devLoopTimes.InnerBuildTimes) != len(devLoopTimes.InnerDeployTimes) && len(devLoopTimes.InnerBuildTimes) != len(devLoopTimes.InnerStatusCheckTimes) {
t.Fatalf("expected devLoopTimes arrays to have same lengths")
}
t.CheckDeepEqual(len(devLoopTimes.InnerBuildTimes), test.expectedDevLoops)
}
})
}
}
|
package redis
import (
"fmt"
"session-sample/server/config"
"github.com/go-redis/redis/v8"
)
func Connection() *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%s", config.RedisHost, config.RedisPort),
})
return client
}
|
package forms_test
import (
. "github.com/d11wtq/bijou/runtime"
"github.com/d11wtq/bijou/test"
"strings"
"testing"
)
func TestMacroReturnsAMacro(t *testing.T) {
form := test.NewList(Symbol("macro"), test.NewList(Symbol("x")))
env := test.FakeEnv()
v, err := form.Eval(env)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if v.Type() != MacroType {
t.Fatalf(`expected v.Type() == MacroType, got %s`, v.Type())
}
mc := v.(*Macro)
if mc.Params != form.Tail().Head() {
t.Fatalf(`expected macro.Params == form.Tail().Head(), got %s`, mc.Params)
}
if mc.Body != form.Tail().Tail() {
t.Fatalf(`expected macro.Body == form.Tail().Tail(), got %s`, mc.Body)
}
if mc.Env != env {
t.Fatalf(`expected macro.Env == env, got %s`, mc.Env)
}
}
func TestMacroValidatesParameterListPresence(t *testing.T) {
form := test.NewList(Symbol("macro"))
v, err := form.Eval(test.FakeEnv())
errmsg := "missing param"
if err == nil {
t.Fatalf(`expected err != nil, got nil`)
}
if !strings.Contains(strings.ToLower(err.Error()), errmsg) {
t.Fatalf(`expected err to match "%s", got %s`, errmsg, err)
}
if v != nil {
t.Fatalf(`expected v == nil, got %s`, v)
}
}
func TestMacroValidatesParameterListType(t *testing.T) {
form := test.NewList(Symbol("macro"), Int(1))
v, err := form.Eval(test.FakeEnv())
errmsg := "invalid param"
if err == nil {
t.Fatalf(`expected err != nil, got nil`)
}
if !strings.Contains(strings.ToLower(err.Error()), errmsg) {
t.Fatalf(`expected err to match "%s", got %s`, errmsg, err)
}
if v != nil {
t.Fatalf(`expected v == nil, got %s`, v)
}
}
func TestMacroValidatesVariadicIsLastParameter(t *testing.T) {
form := test.NewList(
Symbol("macro"),
test.NewList(Symbol("&"), Symbol("x"), Symbol("y")),
)
v, err := form.Eval(test.FakeEnv())
errmsg := "variadic"
if err == nil {
t.Fatalf(`expected err != nil, got nil`)
}
if !strings.Contains(strings.ToLower(err.Error()), errmsg) {
t.Fatalf(`expected err to match "%s", got %s`, errmsg, err)
}
if v != nil {
t.Fatalf(`expected v == nil, got %s`, v)
}
}
func TestMacroAllowsEmptyVariadic(t *testing.T) {
form := test.NewList(Symbol("macro"), test.NewList(Symbol("&")))
env := test.FakeEnv()
v, err := form.Eval(env)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if v.Type() != MacroType {
t.Fatalf(`expected v.Type() == MacroType, got %s`, v.Type())
}
mc := v.(*Macro)
if mc.Params != form.Tail().Head() {
t.Fatalf(`expected macro.Params == form.Tail().Head(), got %s`, mc.Params)
}
}
|
package main
// 结构体
// type identifier struct {
// field1 type1
// field2 type2
// ...
// }
func main() {
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"path"
"strings"
"syscall"
"time"
"github.com/Clever/analytics-util/analyticspipeline"
discovery "github.com/Clever/discovery-go"
"github.com/Clever/s3-to-redshift/v3/logger"
redshift "github.com/Clever/s3-to-redshift/v3/redshift"
s3filepath "github.com/Clever/s3-to-redshift/v3/s3filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
multierror "github.com/hashicorp/go-multierror"
"github.com/kardianos/osext"
env "github.com/segmentio/go-env"
)
var (
// things which will would strongly suggest launching as a second worker are env vars
// also the secrets ... shhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh
host = os.Getenv("REDSHIFT_HOST")
port = os.Getenv("REDSHIFT_PORT")
dbName = env.MustGet("REDSHIFT_DB")
user = env.MustGet("REDSHIFT_USER")
pwd = env.MustGet("REDSHIFT_PASSWORD")
redshiftRoleARN = env.MustGet("REDSHIFT_ROLE_ARN")
cleanupWorker = env.MustGet("CLEANUP_WORKER")
// payloadForSignalFx holds a subset of the job payload that
// we want to alert on as a dimension in SignalFx.
// This is necessary because we would like to selectively group
// on job parameters - schema but not date, for instance, since
// logging the date would overwhelm SignalFx
payloadForSignalFx string
gearmanAdminURL string
)
func init() {
gearmanAdminUser := env.MustGet("GEARMAN_ADMIN_USER")
gearmanAdminPass := env.MustGet("GEARMAN_ADMIN_PASS")
gearmanAdminPath := env.MustGet("GEARMAN_ADMIN_PATH")
gearmanAdminURL = generateServiceEndpoint(gearmanAdminUser, gearmanAdminPass, gearmanAdminPath)
}
func generateServiceEndpoint(user, pass, path string) string {
hostPort, err := discovery.HostPort("gearman-admin", "http")
if err != nil {
log.Fatal(err)
}
proto, err := discovery.Proto("gearman-admin", "http")
if err != nil {
log.Fatal(err)
}
return fmt.Sprintf("%s://%s:%s@%s%s", proto, user, pass, hostPort, path)
}
func fatalIfErr(err error, msg string) {
if err != nil {
logger.JobFinishedEvent(payloadForSignalFx, false)
panic(fmt.Sprintf("%s: %s", msg, err)) // TODO: kayvee
}
}
// helper function used for verifying inputs
func getMapKeys(m map[string]bool) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
// Rounds down a dateTime to a granularity
// For instance, 11:50AM will be truncated to 11:00AM
// if given a granularity of an hour
func truncateDate(date time.Time, granularity string) time.Time {
switch granularity {
case "hour":
return date.Truncate(time.Hour)
default:
// Round down to day granularity by default
return date.Truncate(24 * time.Hour)
}
}
// Calculates whether or not input data (s3) is more stale than target data (Redshift)
// Expects:
// - inputDataDate corresponds to the s3 data timestamp of the job
// - targetDataDate is the maximum timestamp of the DB table
// - granularity indicating how often data snapshots are recorded in the target
func isInputDataStale(inputDataDate time.Time, targetDataDate *time.Time,
granularity string, targetDataLoc *time.Location,
) bool {
// If target table has no data, then input data is fresh by default
if targetDataDate == nil {
return false
}
// Handle comparison for target data in a different time zone (ex. PT)
_, offsetSec := targetDataDate.In(targetDataLoc).Zone()
offsetDuration, err := time.ParseDuration(fmt.Sprintf("%vs", -1*offsetSec))
fatalIfErr(err, "isInputDataStale was unable to parse offset duration")
*targetDataDate = targetDataDate.Add(offsetDuration)
// We truncate the timestamps to make the comparison at the correct granularity
// i.e. input data lagging by two hours is considered stale when granularity is hourly,
// but it can still be considered fresh when the granularity is daily.
return truncateDate(*targetDataDate, granularity).After(truncateDate(inputDataDate, granularity))
}
// getRegionForBucket looks up the region name for the given bucket
func getRegionForBucket(name string) (string, error) {
// Any region will work for the region lookup, but the request MUST use
// PathStyle
config := aws.NewConfig().WithRegion("us-west-1").WithS3ForcePathStyle(true)
session := session.New()
client := s3.New(session, config)
params := s3.GetBucketLocationInput{
Bucket: aws.String(name),
}
resp, err := client.GetBucketLocation(¶ms)
if err != nil {
return "", fmt.Errorf("failed to get location for bucket '%s', %s", name, err)
}
if resp.LocationConstraint == nil {
// "US Standard", returns an empty region. So return any region in the US
return "us-east-1", nil
}
return *resp.LocationConstraint, nil
}
// in a transaction, truncate, create or update, and then copy from the s3 data file or manifest
// yell loudly if there is anything different in the target table compared to config (different distkey, etc)
func runCopy(
db *redshift.Redshift, inputConf s3filepath.S3File, inputTable redshift.Table, targetTable *redshift.Table,
truncate, gzip bool, delimiter, timeGranularity, targetTimeZone, streamStart, streamEnd string,
) error {
tx, err := db.Begin()
if err != nil {
return err
}
// TRUNCATE for dimension tables, but not fact tables
if truncate && targetTable != nil {
log.Println("truncating table!")
if err := db.Truncate(tx, inputConf.Schema, inputTable.Name); err != nil {
return fmt.Errorf("err running truncate table: %s", err)
}
}
if targetTable == nil {
if err := db.CreateTable(tx, inputTable); err != nil {
return fmt.Errorf("err running create table: %s", err)
}
} else {
var start, end time.Time
var err error
if timeGranularity == "stream" {
start, err = time.Parse("2006-01-02T15:04:05", streamStart)
if err != nil {
return err
}
end, err = time.Parse("2006-01-02T15:04:05", streamEnd)
if err != nil {
return err
}
} else {
start, end = startEndFromGranularity(inputConf.DataDate, timeGranularity, targetTimeZone)
}
// To prevent duplicates, clear away any existing data within a certain time range as the data date
// (that is, sharing the same data date up to a certain time granularity)
if err := db.TruncateInTimeRange(tx, inputConf.Schema, inputTable.Name, inputTable.Meta.DataDateColumn, start, end); err != nil {
return fmt.Errorf("err truncating data for data refresh: %s", err)
}
if err := db.UpdateTable(tx, inputTable, *targetTable); err != nil {
return fmt.Errorf("err running update table: %s", err)
}
}
// COPY direct into it, ok to do since we're in a transaction
// can't switch on file ending as manifest files b/c
// manifest files obscure the underlying file types
// instead just pass the delimiter along even if it's null
if err := db.Copy(tx, inputConf, delimiter, true, gzip); err != nil {
return fmt.Errorf("err running copy: %s", err)
}
// Update the latency info table so we have an easier record of the last update.
if err := db.UpdateLatencyInfo(tx, *targetTable); err != nil {
return fmt.Errorf("err updating latency info: %s", err)
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("err committing transaction: %s", err)
}
// There's a good chance we've deleted some data in the table here (e.g. a stream load,
// truncate, or update historical set that exists). Run a vacuum to clear out the old data.
// Only one vacuum can be run at a time, so we're going to throw this over the wall to
// redshift-vacuum and use gearman-admin as a queueing service.
if len(gearmanAdminURL) == 0 {
log.Fatalf("Unable to post vacuum-analyze job to %s", cleanupWorker)
} else {
log.Println("Submitting job to Gearman admin")
// N.B. We need to pass backslashes to escape the quotation marks as required
// by Golang's os.Args for command line arguments
cleanupArgs := map[string]string{
"targets": inputConf.Schema + `."` + inputTable.Name + `"`,
"vacuum_mode": "delete",
// If we truncated, analyze will run regardless since 100% of the rows have changed. Otherwise,
// only analyze if we've changed enough rows (threshold > 1%)
"analyze_mode": "full",
"analyze_threshold": "1",
}
payload, err := json.Marshal(cleanupArgs)
if err != nil {
log.Fatalf("Error creating new payload: %s", err)
}
client := &http.Client{}
endpoint := gearmanAdminURL + fmt.Sprintf("/%s", cleanupWorker)
req, err := http.NewRequest("POST", endpoint, bytes.NewReader(payload))
if err != nil {
log.Fatalf("Error creating new request: %s", err)
}
req.Header.Add("Content-Type", "text/plain")
_, err = client.Do(req)
if err != nil {
log.Fatalf("Error submitting job: %s", err)
}
}
return nil
}
func startEndFromGranularity(t time.Time, granularity string, targetTimezone string) (time.Time, time.Time) {
// Rotate time if in PT
log.Print(targetTimezone)
if targetTimezone != "UTC" {
ptLoc, err := time.LoadLocation(targetTimezone)
fatalIfErr(err, "startEndFromGranularity was unable to load timezone")
_, ptOffsetSec := t.In(ptLoc).Zone()
ptOffsetDuration, err := time.ParseDuration(fmt.Sprintf("%vs", ptOffsetSec))
fatalIfErr(err, "startEndFromGranularity was unable to parse offset duration")
t = t.Add(ptOffsetDuration)
}
var duration time.Duration
if granularity == "day" {
duration = time.Hour * 24
} else {
duration = time.Hour
}
start := t.UTC().Truncate(duration)
end := start.Add(duration)
return start, end
}
type payload struct {
InputSchemaName string `config:"schema"`
InputTables string `config:"tables"`
InputBucket string `config:"bucket,required"`
Truncate bool `config:"truncate"`
Force bool `config:"force"`
DataDate string `config:"date,required"`
ConfigFile string `config:"config"`
GZip bool `config:"gzip"`
Delimiter string `config:"delimiter"`
TimeGranularity string `config:"granularity,required"`
StreamStart string `config:"streamStart"`
StreamEnd string `config:"streamEnd"`
TargetTimezone string `config:"timezone"`
SkipLoad bool `config:"skipLoad"`
}
// This worker finds the latest file in s3 and uploads it to redshift
// If the destination table does not exist, the worker creates it
// If the destination table lacks columns, the worker creates those as well
// The worker also uses a column in the data to figure out whether the s3 data is
// newer than what already exists.
func main() {
dir, err := osext.ExecutableFolder()
if err != nil {
log.Fatal(err)
}
err = logger.SetGlobalRouting(path.Join(dir, "kvconfig.yml"))
if err != nil {
log.Fatal(err)
}
flags := payload{ // Specifying defaults:
InputSchemaName: "mongo_raw",
InputTables: "",
InputBucket: "",
Truncate: false,
Force: false,
DataDate: "",
ConfigFile: "",
GZip: true,
Delimiter: "",
TimeGranularity: "day",
StreamStart: "",
StreamEnd: "",
TargetTimezone: "UTC",
SkipLoad: false,
}
nextPayload, err := analyticspipeline.AnalyticsWorker(&flags)
if err != nil {
log.Fatalf("err: %#v", err)
}
defer analyticspipeline.PrintPayload(nextPayload)
// If we're to skip the load, do it early. Don't print out the schema or job finished info.
// This wasn't a job that we did anything for.
if flags.SkipLoad {
// Proceed directly to next job. Do not pass go. Do not collect logs.
return
}
payloadForSignalFx = fmt.Sprintf("--schema %s", flags.InputSchemaName)
defer logger.JobFinishedEvent(payloadForSignalFx, true)
if flags.DataDate == "" {
logger.JobFinishedEvent(payloadForSignalFx, false)
panic("No date provided")
}
// verify that timeGranularity is a supported value. for convenience,
// we use the convention that granularities must be valid PostgreSQL dateparts
// (see: http://www.postgresql.org/docs/8.1/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC)
supportedGranularities := map[string]bool{"hour": true, "day": true, "stream": true}
if !supportedGranularities[flags.TimeGranularity] {
logger.JobFinishedEvent(payloadForSignalFx, false)
panic(fmt.Sprintf("Unsupported granularity, must be one of %v", getMapKeys(supportedGranularities)))
}
// verify that targetTimezone is a supported Golang location (i.e. "America/Los_Angeles")
targetDataLocation, err := time.LoadLocation(flags.TargetTimezone)
fatalIfErr(err, fmt.Sprintf("unable to load timezone '%s'", flags.TargetTimezone))
awsRegion, locationErr := getRegionForBucket(flags.InputBucket)
fatalIfErr(locationErr, "error getting location for bucket "+flags.InputBucket)
// use an custom bucket type for testablitity
bucket := s3filepath.S3Bucket{
Name: flags.InputBucket,
Region: awsRegion,
RedshiftRoleARN: redshiftRoleARN}
timeout := 60 // can parameterize later if this is an issue
if host == "" {
host = "localhost"
}
if port == "" {
port = "5439"
}
ctx, cancel := context.WithCancel(context.Background())
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Signal(syscall.SIGTERM))
go func() {
for range c {
// sfncli will send signals to our container
// we should gracefully terminate any running SQL queries
cancel()
}
}()
db, err := redshift.NewRedshift(ctx, host, port, dbName, user, pwd, timeout)
fatalIfErr(err, "error getting redshift instance")
var copyErrors error
// for each table passed in - likely we could goroutine this out
for _, t := range strings.Split(flags.InputTables, ",") {
log.Printf("attempting to run on schema: %s table: %s", flags.InputSchemaName, t)
// override most recent data file
parsedInputDate, err := time.Parse(time.RFC3339, flags.DataDate)
fatalIfErr(err, fmt.Sprintf("issue parsing date: %s", flags.DataDate))
inputConf, err := s3filepath.CreateS3File(s3filepath.S3PathChecker{}, bucket, flags.InputSchemaName, t, flags.ConfigFile, parsedInputDate)
fatalIfErr(err, "Issue getting data file from s3")
inputTable, err := db.GetTableFromConf(*inputConf) // allow passing explicit config later
fatalIfErr(err, "Issue getting table from input")
// figure out what the current state of the table is to determine if the table is already up to date
targetTable, targetDataDate, err := db.GetTableMetadata(inputConf.Schema, inputConf.Table, inputTable.Meta.DataDateColumn)
if err != nil {
fatalIfErr(err, "Error getting existing latest table metadata") // use fatalIfErr to stay the same
}
// unless --force, don't update unless input data is new
if flags.TimeGranularity != "stream" && isInputDataStale(parsedInputDate, targetDataDate, flags.TimeGranularity, targetDataLocation) {
if flags.Force == false {
log.Printf("Recent data already exists in db: %s", *targetDataDate)
continue
}
log.Printf("Forcing update of inputTable: %s", inputConf.Table)
}
if err := runCopy(
db, *inputConf, *inputTable, targetTable, flags.Truncate, flags.GZip, flags.Delimiter,
flags.TimeGranularity, flags.TargetTimezone, flags.StreamStart, flags.StreamEnd,
); err != nil {
log.Printf("error running copy for table %s: %s", t, err)
copyErrors = multierror.Append(copyErrors, err)
} else {
// DON'T NEED TO CREATE VIEWS - will be handled by the refresh script
log.Printf("done with table: %s.%s", inputConf.Schema, t)
}
}
if copyErrors != nil {
log.Fatalf("error loading tables: %s", copyErrors)
}
}
|
package main
import "fmt"
func taumBirthday(b, w, bc, wc, z int) (output int) {
var x, y int
if bc > wc && z < bc-wc {
x = w * wc
y = b * (wc + z)
} else if wc > bc && z < wc-bc {
x = b * bc
y = w * (bc + z)
} else {
x = b * bc
y = w * wc
}
output = x + y
return output
}
func main() {
fmt.Println(taumBirthday(5, 9, 2, 3, 4))
}
|
package leetcode
import "fmt"
/*
* @lc app=leetcode id=443 lang=golang
*
* [443] String Compression
*/
// @lc code=start
type BytePair struct {
c byte
count int
}
func compress(chars []byte) int {
// Case1: empty chars
if len(chars) <=1 {
return len(chars)
}
// Case2: normal
sentryIdx := 0
sentryChar := chars[0]
writeIdx := 0
for i := 0; i <= len(chars); i++ {
if i == len(chars) || chars[i] != sentryChar {
chars[writeIdx] = sentryChar
writeIdx++
if i > sentryIdx {
stringCount := fmt.Sprintf("%d", i - sentryIdx)
for j := 0; j < len(stringCount); j++ {
chars[writeIdx] = stringCount[j]
writeIdx++
}
}
sentryChar = chars[i]
sentryIdx = i
}
}
return writeIdx
}
// @lc code=end
|
package dappauth
import (
"bytes"
"context"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"math/big"
"strings"
"github.com/ethereum/go-ethereum"
ethAbi "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
ethCrypto "github.com/ethereum/go-ethereum/crypto"
)
type mockContract struct {
address common.Address
authorizedKey *ecdsa.PublicKey
errorIsValidSignature bool
}
func (m *mockContract) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
return nil, fmt.Errorf("CodeAt not supported")
}
func (m *mockContract) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
methodCall := hex.EncodeToString(call.Data[:4])
methodParams := call.Data[4:]
switch methodCall {
case "1626ba7e":
return m._1626ba7e(methodParams)
default:
return nil, fmt.Errorf("Unexpected method %v", methodCall)
}
}
// "IsValidSignature" method call
func (m *mockContract) _1626ba7e(methodParams []byte) ([]byte, error) {
// TODO: refactor out of method
const definition = `[
{ "name" : "mixedBytes", "constant" : true, "type": "function", "outputs": [{ "name": "a", "type": "bytes32" }, { "name": "b", "type": "bytes" } ] }]`
abi, err := ethAbi.JSON(strings.NewReader(definition))
if err != nil {
return nil, err
}
data := [32]byte{}
sig := []byte{}
mixedBytes := []interface{}{&data, &sig}
err = abi.UnpackIntoInterface(&mixedBytes, "mixedBytes", methodParams)
if err != nil {
return nil, err
}
if m.errorIsValidSignature {
return nil, errors.New("Dummy error")
}
// split to 65 bytes (130 hex) chunks
multiSigs := chunk65Bytes(sig)
expectedAuthrorisedSig := multiSigs[0][:]
expectedAuthrorisedSig[64] -= 27 // Transform V from 27/28 to 0/1 according to the yellow paper
dataErc191Hash := erc191MessageHash(data[:], m.address)
recoveredKey, err := ethCrypto.SigToPub(dataErc191Hash, expectedAuthrorisedSig)
if err != nil {
return nil, err
}
if m.authorizedKey == nil {
return _false()
}
recoveredAddress := ethCrypto.PubkeyToAddress(*recoveredKey)
authorizedKeyAddr := ethCrypto.PubkeyToAddress(*m.authorizedKey)
if bytes.Compare(authorizedKeyAddr.Bytes(), recoveredAddress.Bytes()) == 0 {
return _true()
}
return _false()
}
func _true() ([]byte, error) {
// magic value is 0x1626ba7e
return hex.DecodeString("1626ba7e00000000000000000000000000000000000000000000000000000000")
}
func _false() ([]byte, error) {
return hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000000")
}
func erc191MessageHash(msg []byte, address common.Address) []byte {
b := append([]byte{}, 25, 0)
b = append(b, address.Bytes()...)
b = append(b, msg...)
return ethCrypto.Keccak256(b)
}
func chunk65Bytes(b []byte) [][65]byte {
chunkSize := 65
var chunks [][65]byte
for i := 0; i < len(b); i += chunkSize {
end := i + chunkSize
if end > len(b) {
end = len(b)
}
var chunk [65]byte
copy(chunk[:], b[i:end])
chunks = append(chunks, chunk)
}
return chunks
}
|
package conversion
import "fmt"
import "strconv"
import "strings"
func Int2String(input chan int, output chan string) {
for v := range input{
output <- strconv.Itoa(v)
}
close(output)
}
func String2Int(input chan string, output chan int) {
for v := range input {
i, err := strconv.Atoi(v)
if err != nil {
fmt.Println("could not parse")
} else {
output <- i
}
}
close(output)
}
func String2IntArray(input chan string, output chan []int) {
for arrStr := range input {
strArr := strings.Split(arrStr, ",")
var arr = []int{}
for _, str := range strArr {
i, err := strconv.Atoi(str)
if err != nil {
fmt.Println("could not parse")
} else {
arr = append(arr, i)
}
}
output <- arr
}
close(output)
}
func IntArray2String(input chan []int, output chan string) {
for arr := range input {
var strArr = []string{}
for _, i := range arr {
strArr = append(strArr, strconv.Itoa(i))
}
output <- strings.Join(strArr,",")
}
close(output)
}
|
package schemas
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestDecode(t *testing.T) {
tests := []struct {
name string
data string
want interface{}
}{
{
name: "post",
data: `{
"id": "id-1",
"source": { "id": "1", "name": "source name" },
"schemaType": "POST",
"data": {
"title": "my title",
"link": "my link"
}
}`,
want: NewPost(
"id-1",
Source{ID: "1", Name: "source name"},
PostData{Title: "my title", Link: "my link"},
),
},
{
name: "any",
data: `{
"id": "id-1",
"source": { "id": "1", "name": "source name" },
"schemaType": "ANY",
"data": {
"title": "my title",
"link": "my link"
}
}`,
want: NewAny(
"id-1",
Source{ID: "1", Name: "source name"},
map[string]interface{}{
"title": "my title",
"link": "my link",
},
),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
data := []byte(tt.data)
result, err := Decode(data)
require.NoError(t, err)
require.Equal(t, tt.want, result)
})
}
}
|
package main
type _TloginGenerator struct {
lgSrvDownInfoLX *_TsrvDownInfo // server Info seting , update by Github , try to connect
lgCB850101init func(*_TloginGenerator) // if nil , use the default init procedure // _FudpDecode__800101x__init__tryUdpLogin__default
lgCB850201chRece func(*_TloginGenerator) // if nil , use the default receive // _Flogin__800201x__chan_in__default
lgCB850301ConnGen func(*_TloginGenerator) // if nil , use the default receive // _FloginGen__800301x__connGen__default
lgCHuConnPortLO *chan _TudpConnPort // all data need to be sent by nodeS send here , then will distribute to one of node
lgCHdataMachineIdI chan _TdataMachinEid // lgCHconnectSuccI chan bool // connect/login succeed true/false
lgConnected bool
}
|
/*
* aggro
*
* Functions for querying data from the RIPEstat API
*
* Copyright (c) 2017 Noora Halme - please see LICENSE.md
*
*/
package main
import (
"errors"
"fmt"
"net/http"
"io/ioutil"
"strings"
"encoding/json"
)
// fetch a list of prefixes associated with a country matching an ISO-3166 alpha-2 country code
// append the slices provided as arguments with the found prefixes
func getCountryPrefixes(cc string, ipv4Slice *[]string, ipv6Slice *[]string) (int, int, error) {
ann4 := 0; ann6 := 0
url := fmt.Sprintf("https://stat.ripe.net/data/country-resource-list/data.json?resource=%s&v4_format=prefix", cc)
res, err := http.Get(url);
if err == nil {
bytes, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err == nil {
var data map[string]interface{}
if err := json.Unmarshal(bytes, &data); err != nil {
err := errors.New("JSON parsing failed")
return 0, 0, err
}
if data["status"] == "ok" {
// got the list of resources - read the ipv4 and ipv6 prefixes
var prefixes []interface{}
prefixes = data["data"].(map[string]interface{})["resources"].(map[string]interface{})["ipv4"].([]interface{})
for j := 0; j < len(prefixes); j++ {
*ipv4Slice=append(*ipv4Slice, prefixes[j].(string));
ann4++
}
prefixes = data["data"].(map[string]interface{})["resources"].(map[string]interface{})["ipv6"].([]interface{})
for j := 0; j < len(prefixes); j++ {
*ipv6Slice=append(*ipv6Slice, prefixes[j].(string));
ann6++
}
}
} else {
return 0, 0, errors.New("Reading document body failed")
}
} else {
return 0, 0, errors.New("HTTP request failed")
}
return ann4, ann6, nil
}
// fetch a list of all announced prefixes for an AS using the RIPEstat API
// append the slices provided as arguments with the found prefixes
func getASPrefixes(as int, ipv4Slice *[]string, ipv6Slice *[]string) (int, int, error) {
ann4 := 0; ann6 := 0
url := fmt.Sprintf("https://stat.ripe.net//data/announced-prefixes/data.json?resource=AS%d", as);
res, err := http.Get(url);
if err == nil {
bytes, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err == nil {
var data map[string]interface{}
if err := json.Unmarshal(bytes, &data); err != nil {
err := errors.New("JSON parsing failed")
return 0, 0, err
}
if data["status"] == "ok" {
prefixes := data["data"].(map[string]interface{})["prefixes"].([]interface{})
for j := 0; j < len(prefixes); j++ {
prefix := prefixes[j].(map[string]interface{})["prefix"].(string)
if strings.ContainsRune(prefix, ':') {
//fmt.Printf("# IPv6: %s\n", prefix)
*ipv6Slice=append(*ipv6Slice, prefix);
ann6++
} else {
//fmt.Printf("# IPv4: %s\n", prefix)
*ipv4Slice=append(*ipv4Slice, prefix);
ann4++
}
}
}
} else {
return 0, 0, errors.New("Reading document body failed")
}
} else {
return 0, 0, errors.New("HTTP request failed")
}
return ann4, ann6, nil
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_pluralize(t *testing.T) {
assert.Equal(t, "migration", pluralize("migration", 1))
assert.Equal(t, "migrations", pluralize("migration", 2))
assert.Equal(t, "migrations", pluralize("migration", 0))
}
|
package main
import (
"/home/ubuntu/proj/helloworld/hello"
"micode.be.xiaomi.com/systech/soa/thrift"
)
func (p *HelloServiceHandler) HelloWorld(ctx *thrift.XContext, name string) (r *hello.Result_, errRet error) {
return
}
|
package historian_test
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
"fmt"
. "devicedb/historian"
. "devicedb/storage"
. "devicedb/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Historian", func() {
var (
storageEngine StorageDriver
historian *Historian
)
BeforeEach(func() {
storageEngine = MakeNewStorageDriver()
storageEngine.Open()
historian = NewHistorian(storageEngine, 101, 0, 1000)
})
AfterEach(func() {
storageEngine.Close()
})
Context("There are 100 logged events of 4 different varieties in the history from three different sources", func() {
BeforeEach(func() {
for i := 0; i < 100; i += 1 {
historian.LogEvent(&Event{
Timestamp: uint64(i),
SourceID: fmt.Sprintf("source-%d", (i % 3)),
Type: fmt.Sprintf("type-%d", (i % 4)),
Data: fmt.Sprintf("data-%d", i >> 4),
})
}
})
Describe("performing a query filtering only by time range", func() {
It("should return only events that happened between [50, 100) after the purge", func() {
Expect(historian.Purge(&HistoryQuery{ Before: 50 })).Should(BeNil())
iter, err := historian.Query(&HistoryQuery{ })
Expect(err).Should(BeNil())
Expect(historian.LogSize()).Should(Equal(uint64(50)))
Expect(historian.LogSerial()).Should(Equal(uint64(101)))
for i := 50; i < 100; i += 1 {
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return only events that happened between [50, 100)", func() {
var minSerial uint64 = 50
iter, err := historian.Query(&HistoryQuery{ MinSerial: &minSerial })
Expect(err).Should(BeNil())
Expect(historian.LogSize()).Should(Equal(uint64(100)))
Expect(historian.LogSerial()).Should(Equal(uint64(101)))
for i := 49; i < 100; i += 1 {
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return ten events from least to most recent when a time range between [0, 10) is queried and an ascending order is specified implictly", func() {
iter, err := historian.Query(&HistoryQuery{
After: 0,
Before: 10,
})
Expect(err).Should(BeNil())
for i := 0; i < 10; i += 1 {
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return ten events from least to most recent when a time range between [0, 10) is queried and an ascending order is specified explicitly", func() {
iter, err := historian.Query(&HistoryQuery{
After: 0,
Before: 10,
Order: "asc",
})
Expect(err).Should(BeNil())
for i := 0; i < 10; i += 1 {
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return ten events from most to least recent when a time range between [0, 10) is queried and a descending order is specified", func() {
iter, err := historian.Query(&HistoryQuery{
After: 0,
Before: 10,
Order: "desc",
})
Expect(err).Should(BeNil())
for i := 9; i >= 0; i -= 1 {
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
})
Describe("performing a query filtering only by source", func() {
It("should return all events from source-0 in ascending order", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0" },
})
Expect(err).Should(BeNil())
for i := 0; i < 100; i += 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return all events from source-0 in ascending order", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0" },
Order: "asc",
})
Expect(err).Should(BeNil())
for i := 0; i < 100; i += 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return all events from source-0 in descending order", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0" },
Order: "desc",
})
Expect(err).Should(BeNil())
for i := 99; i >= 0; i -= 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return all events from source-0 and source-2 in ascending order", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0", "source-2" },
})
Expect(err).Should(BeNil())
for i := 0; i < 100; i += 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
for i := 0; i < 100; i += 1 {
if i % 3 != 2 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
It("should return all events from source-0 and source-2 in ascending order", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-2", "source-0" },
})
Expect(err).Should(BeNil())
for i := 0; i < 100; i += 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
for i := 0; i < 100; i += 1 {
if i % 3 != 2 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
})
Describe("performing a query filtering by source and time", func() {
It("should return all events from source-0 and source-2 in ascending order between times [0, 50)", func() {
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0", "source-2" },
After: 0,
Before: 50,
})
Expect(err).Should(BeNil())
for i := 0; i < 50; i += 1 {
if i % 3 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
for i := 0; i < 50; i += 1 {
if i % 3 != 2 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
})
Describe("performing a query filtering by source, time and data", func() {
It("should return all events from source-0 and source-2 in ascending order between times [0, 50) whose data is data-0", func() {
var data = "data-0"
iter, err := historian.Query(&HistoryQuery{
Sources: []string{ "source-0", "source-2" },
After: 0,
Before: 50,
Data: &data,
})
Expect(err).Should(BeNil())
for i := 0; i < 50; i += 1 {
if i % 3 != 0 || i >> 4 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
for i := 0; i < 50; i += 1 {
if i % 3 != 2 || i >> 4 != 0 {
continue
}
Expect(iter.Next()).Should(BeTrue())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(Not(BeNil()))
Expect(iter.Event().Timestamp).Should(Equal(uint64(i)))
Expect(iter.Event().SourceID).Should(Equal(fmt.Sprintf("source-%d", (i % 3))))
Expect(iter.Event().Type).Should(Equal(fmt.Sprintf("type-%d", (i % 4))))
Expect(iter.Event().Data).Should(Equal(fmt.Sprintf("data-%d", (i >> 4))))
}
Expect(iter.Next()).Should(BeFalse())
Expect(iter.Error()).Should(BeNil())
Expect(iter.Event()).Should(BeNil())
})
})
})
})
|
package helper
import (
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/loft-sh/devspace/pkg/devspace/build/builder/restart"
"github.com/loft-sh/devspace/pkg/util/kubeconfig"
logpkg "github.com/loft-sh/devspace/pkg/util/log"
dockerterm "github.com/moby/term"
"github.com/sirupsen/logrus"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/cli/cli/command/image/build"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/loft-sh/devspace/pkg/devspace/config/localcache"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
dockerclient "github.com/loft-sh/devspace/pkg/devspace/docker"
"github.com/loft-sh/devspace/pkg/devspace/kubectl"
"github.com/loft-sh/devspace/pkg/util/hash"
"github.com/loft-sh/utils/pkg/command"
"github.com/pkg/errors"
"gopkg.in/yaml.v3"
)
var (
_, stdout, _ = dockerterm.StdStreams()
)
// BuildHelper is the helper class to store common functionality used by both the docker and kaniko builder
type BuildHelper struct {
ImageConf *latest.Image
DockerfilePath string
ContextPath string
EngineName string
ImageName string
ImageTags []string
Entrypoint []string
Cmd []string
}
// BuildHelperInterface is the interface the build helper uses to build an image
type BuildHelperInterface interface {
BuildImage(ctx devspacecontext.Context, absoluteContextPath string, absoluteDockerfilePath string, entrypoint []string, cmd []string) error
}
// NewBuildHelper creates a new build helper for a certain engine
func NewBuildHelper(ctx devspacecontext.Context, engineName string, imageConf *latest.Image, imageTags []string) *BuildHelper {
var (
dockerfilePath, contextPath = GetDockerfileAndContext(ctx, imageConf)
imageName = imageConf.Image
)
// Check if we should overwrite entrypoint
var (
entrypoint []string
cmd []string
)
if imageConf.Entrypoint != nil {
entrypoint = imageConf.Entrypoint
}
if imageConf.Cmd != nil {
cmd = imageConf.Cmd
}
return &BuildHelper{
ImageConf: imageConf,
DockerfilePath: dockerfilePath,
ContextPath: contextPath,
ImageName: imageName,
ImageTags: imageTags,
EngineName: engineName,
Entrypoint: entrypoint,
Cmd: cmd,
}
}
// Build builds a new image
func (b *BuildHelper) Build(ctx devspacecontext.Context, imageBuilder BuildHelperInterface) error {
ctx.Log().Infof("Building image '%s:%s' with engine '%s'", b.ImageName, b.ImageTags[0], b.EngineName)
// Build Image
err := imageBuilder.BuildImage(ctx, b.ContextPath, b.DockerfilePath, b.Entrypoint, b.Cmd)
if err != nil {
return err
}
ctx.Log().Done("Done processing image '" + b.ImageName + "'")
return nil
}
// ShouldRebuild determines if the image should be rebuilt
func (b *BuildHelper) ShouldRebuild(ctx devspacecontext.Context, forceRebuild bool) (bool, error) {
imageCache, _ := ctx.Config().LocalCache().GetImageCache(b.ImageConf.Name)
// if rebuild strategy is always, we return here
if b.ImageConf.RebuildStrategy == latest.RebuildStrategyAlways {
ctx.Log().Infof("Rebuild image %s because strategy is always rebuild", imageCache.ImageName)
return true, nil
}
// Hash dockerfile
_, err := os.Stat(b.DockerfilePath)
if err != nil {
return false, errors.Errorf("Dockerfile %s missing: %v", b.DockerfilePath, err)
}
dockerfileHash, err := hash.Directory(b.DockerfilePath)
if err != nil {
return false, errors.Wrap(err, "hash dockerfile")
}
// Hash image config
configStr, err := yaml.Marshal(*b.ImageConf)
if err != nil {
return false, errors.Wrap(err, "marshal image config")
}
imageConfigHash := hash.String(string(configStr))
// Hash entrypoint
entrypointHash := ""
if len(b.Entrypoint) > 0 {
for _, str := range b.Entrypoint {
entrypointHash += str
}
}
if len(b.Cmd) > 0 {
for _, str := range b.Cmd {
entrypointHash += str
}
}
if entrypointHash != "" {
entrypointHash = hash.String(entrypointHash)
}
// only rebuild Docker image when Dockerfile or context has changed since latest build
mustRebuild := imageCache.Tag == "" || imageCache.DockerfileHash != dockerfileHash || imageCache.ImageConfigHash != imageConfigHash || imageCache.EntrypointHash != entrypointHash
if imageCache.Tag == "" {
ctx.Log().Infof("Rebuild image %s because tag is missing", imageCache.ImageName)
} else if imageCache.DockerfileHash != dockerfileHash {
ctx.Log().Infof("Rebuild image %s because dockerfile has changed", imageCache.ImageName)
} else if imageCache.ImageConfigHash != imageConfigHash {
ctx.Log().Infof("Rebuild image %s because image config has changed", imageCache.ImageName)
} else if imageCache.EntrypointHash != entrypointHash {
ctx.Log().Infof("Rebuild image %s because entrypoint has changed", imageCache.ImageName)
}
var lastContextClient kubectl.Client
if ctx.Config().LocalCache().GetLastContext() != nil {
lastContextClient, err = kubectl.NewClientFromContext(
ctx.Config().LocalCache().GetLastContext().Context,
ctx.Config().LocalCache().GetLastContext().Namespace,
false,
kubeconfig.NewLoader(),
)
if err != nil {
return false, err
}
}
// Okay this check verifies if the previous deploy context was local kubernetes context where we didn't push the image and now have a kubernetes context where we probably push
// or use another docker client (e.g. minikube <-> docker-desktop)
if !mustRebuild &&
ctx.KubeClient() != nil &&
ctx.Config().LocalCache().GetLastContext() != nil &&
ctx.Config().LocalCache().GetLastContext().Context != ctx.KubeClient().CurrentContext() &&
kubectl.IsLocalKubernetes(lastContextClient) {
mustRebuild = true
ctx.Log().Infof("Rebuild image %s because previous build was local kubernetes", imageCache.ImageName)
ctx.Config().LocalCache().SetLastContext(&localcache.LastContextConfig{
Namespace: ctx.KubeClient().Namespace(),
Context: ctx.KubeClient().CurrentContext(),
})
}
// Check if should consider context path changes for rebuilding
if b.ImageConf.RebuildStrategy != latest.RebuildStrategyIgnoreContextChanges {
// Hash context path
contextDir, relDockerfile, err := build.GetContextFromLocalDir(b.ContextPath, b.DockerfilePath)
if err != nil {
return false, errors.Wrap(err, "get context from local dir")
}
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
excludes, err := ReadDockerignore(contextDir, relDockerfile)
if err != nil {
return false, errors.Errorf("Error reading .dockerignore: %v", err)
}
contextHash, err := hash.DirectoryExcludes(contextDir, excludes, false)
if err != nil {
return false, errors.Errorf("Error hashing %s: %v", contextDir, err)
}
if !mustRebuild && imageCache.ContextHash != contextHash {
ctx.Log().Infof("Rebuild image %s because build context has changed", imageCache.ImageName)
}
mustRebuild = mustRebuild || imageCache.ContextHash != contextHash
// TODO: This is not an ideal solution since there can be the issue that the user runs
// devspace dev & the generated.yaml is written without ContextHash and on a subsequent
// devspace deploy the image would be rebuild, because the ContextHash was empty and is
// now different. However in this case it is probably better to save the context hash computing
// time during devspace dev instead of always hashing the context path.
if forceRebuild || mustRebuild {
imageCache.ContextHash = contextHash
}
}
if forceRebuild || mustRebuild {
imageCache.DockerfileHash = dockerfileHash
imageCache.ImageConfigHash = imageConfigHash
imageCache.EntrypointHash = entrypointHash
}
ctx.Config().LocalCache().SetImageCache(b.ImageConf.Name, imageCache)
return mustRebuild, nil
}
func (b *BuildHelper) IsImageAvailableLocally(ctx devspacecontext.Context, dockerClient dockerclient.Client) (bool, error) {
// Hack to check if docker is present in the system
// if docker is not present then skip the image availability check
// and return (true, nil) to skip image rebuild
// if docker is present then do the image availability check
err := command.Command(ctx.Context(), ctx.WorkingDir(), ctx.Environ(), nil, nil, nil, "docker", "buildx")
if err != nil {
return true, nil
}
imageCache, _ := ctx.Config().LocalCache().GetImageCache(b.ImageConf.Name)
imageName := imageCache.ResolveImage() + ":" + imageCache.Tag
dockerAPIClient := dockerClient.DockerAPIClient()
imageList, err := dockerAPIClient.ImageList(ctx.Context(), types.ImageListOptions{})
if err != nil {
return false, err
}
for _, image := range imageList {
for _, repoTag := range image.RepoTags {
if repoTag == imageName {
return true, nil
}
}
}
return false, nil
}
// CreateContextStream creates a new context stream that includes the correct docker context, (modified) dockerfile and inject helper
// if needed.
func (b *BuildHelper) CreateContextStream(contextPath, dockerfilePath string, entrypoint, cmd []string, log logpkg.Logger) (io.Reader, io.WriteCloser, *streams.Out, *types.ImageBuildOptions, error) {
// Buildoptions
options := &types.ImageBuildOptions{}
if b.ImageConf.BuildArgs != nil {
options.BuildArgs = b.ImageConf.BuildArgs
}
if b.ImageConf.Target != "" {
options.Target = b.ImageConf.Target
}
if b.ImageConf.Network != "" {
options.NetworkMode = b.ImageConf.Network
}
// Determine output writer
var writer io.WriteCloser
if log == logpkg.GetInstance() {
writer = logpkg.WithNopCloser(stdout)
} else {
writer = log.Writer(logrus.InfoLevel, false)
}
contextDir, relDockerfile, err := build.GetContextFromLocalDir(contextPath, dockerfilePath)
if err != nil {
return nil, writer, nil, nil, err
}
// Dockerfile is out of context
var dockerfileCtx *os.File
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
// Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx
dockerfileCtx, err = os.Open(dockerfilePath)
if err != nil {
return nil, writer, nil, nil, errors.Errorf("unable to open Dockerfile: %v", err)
}
defer dockerfileCtx.Close()
}
// And canonicalize dockerfile name to a platform-independent one
authConfigs, _ := dockerclient.GetAllAuthConfigs()
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
excludes, err := ReadDockerignore(contextDir, relDockerfile)
if err != nil {
return nil, writer, nil, nil, err
}
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
return nil, writer, nil, nil, errors.Errorf("Error checking context: '%s'", err)
}
buildCtx, err := archive.TarWithOptions(contextDir, &archive.TarOptions{
ExcludePatterns: excludes,
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
})
if err != nil {
return nil, writer, nil, nil, err
}
// Check if we should overwrite entrypoint
injectRestartHelper := b.ImageConf.InjectRestartHelper || b.ImageConf.InjectLegacyRestartHelper
if len(entrypoint) > 0 || len(cmd) > 0 || injectRestartHelper || len(b.ImageConf.AppendDockerfileInstructions) > 0 {
dockerfilePath, err = RewriteDockerfile(dockerfilePath, entrypoint, cmd, b.ImageConf.AppendDockerfileInstructions, options.Target, injectRestartHelper, log)
if err != nil {
return nil, writer, nil, nil, err
}
// Check if dockerfile is out of context, then we use the docker way to replace the dockerfile
if dockerfileCtx != nil {
// We will add it to the build context
dockerfileCtx, err = os.Open(dockerfilePath)
if err != nil {
return nil, writer, nil, nil, errors.Errorf("unable to open Dockerfile: %v", err)
}
defer dockerfileCtx.Close()
} else {
// We will add it to the build context
overwriteDockerfileCtx, err := os.Open(dockerfilePath)
if err != nil {
return nil, writer, nil, nil, errors.Errorf("unable to open Dockerfile: %v", err)
}
buildCtx, err = OverwriteDockerfileInBuildContext(overwriteDockerfileCtx, buildCtx, relDockerfile)
if err != nil {
return nil, writer, nil, nil, errors.Errorf("Error overwriting %s: %v", relDockerfile, err)
}
}
defer os.RemoveAll(filepath.Dir(dockerfilePath))
// inject the build script
if injectRestartHelper {
var helperScript string
var err error
if b.ImageConf.InjectRestartHelper {
helperScript, err = restart.LoadRestartHelper(b.ImageConf.RestartHelperPath)
if err != nil {
return nil, writer, nil, nil, errors.Wrap(err, "load restart helper")
}
} else if b.ImageConf.InjectLegacyRestartHelper {
helperScript, err = restart.LoadLegacyRestartHelper(b.ImageConf.RestartHelperPath)
if err != nil {
return nil, writer, nil, nil, errors.Wrap(err, "load legacy restart helper")
}
}
buildCtx, err = InjectBuildScriptInContext(helperScript, buildCtx)
if err != nil {
return nil, writer, nil, nil, errors.Wrap(err, "inject build script into context")
}
}
}
// replace Dockerfile if it was added from stdin or a file outside the build-context, and there is archive context
if dockerfileCtx != nil && buildCtx != nil {
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
if err != nil {
return nil, writer, nil, nil, err
}
}
// Which tags to build
tags := []string{}
for _, tag := range b.ImageTags {
tags = append(tags, b.ImageName+":"+tag)
}
// Setup an upload progress bar
outStream := streams.NewOut(writer)
progressOutput := streamformatter.NewProgressOutput(outStream)
body := progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
buildOptions := &types.ImageBuildOptions{
Tags: tags,
Dockerfile: relDockerfile,
BuildArgs: options.BuildArgs,
Target: options.Target,
NetworkMode: options.NetworkMode,
AuthConfigs: authConfigs,
}
return body, writer, outStream, buildOptions, nil
}
|
/*
Copyright (c) 2019 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package objectstore
import (
"context"
"github.com/pkg/errors"
"go.opencensus.io/trace"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic/dynamicinformer"
kcache "k8s.io/client-go/tools/cache"
"github.com/vmware/octant/internal/cluster"
"github.com/vmware/octant/internal/log"
"github.com/vmware/octant/pkg/store"
)
// WatchOpt is an option for configuration Watch.
type WatchOpt func(*Watch)
// Watch is a cache which watches the cluster for updates to known objects. It wraps a dynamic cache
// by default. Since the cache knows about all cluster updates, a majority of operations for listing
// and getting objects can happen in local memory instead of requiring a network request.
type Watch struct {
initFactoryFunc func(context.Context, cluster.ClientInterface, string) (dynamicinformer.DynamicSharedInformerFactory, error)
initBackendFunc func(watch *Watch) (store.Store, error)
client cluster.ClientInterface
stopCh <-chan struct{}
cancelFunc context.CancelFunc
factories *factoriesCache
watchedGVKs *watchedGVKsCache
cachedObjects *cachedObjectsCache
handlers map[string]map[schema.GroupVersionKind]watchEventHandler
backendObjectStore store.Store
onClientUpdate chan store.Store
updateFns []store.UpdateFn
}
var _ store.Store = (*Watch)(nil)
func initWatchBackend(w *Watch) (store.Store, error) {
backendObjectStore, err := NewDynamicCache(w.client, w.stopCh, func(d *DynamicCache) {
d.initFactoryFunc = func(ctx context.Context, client cluster.ClientInterface, namespace string) (dynamicinformer.DynamicSharedInformerFactory, error) {
factory, ok := w.factories.get(namespace)
if !ok {
if err := w.HasAccess(ctx, store.Key{Namespace: metav1.NamespaceAll}, "watch"); err != nil {
factory, err = w.initFactoryFunc(ctx, w.client, namespace)
if err != nil {
return nil, err
}
} else {
factory, ok = w.factories.get("")
if !ok {
return nil, errors.New("no default DynamicInformerFactory found")
}
}
}
w.factories.set(namespace, factory)
return factory, nil
}
})
if err != nil {
return nil, errors.Wrap(err, "initial dynamic cache")
}
return backendObjectStore, nil
}
// NewWatch create an instance of new watch. By default, it will create a dynamic cache as its
// backend.
func NewWatch(ctx context.Context, client cluster.ClientInterface, options ...WatchOpt) (*Watch, error) {
c := &Watch{
initFactoryFunc: initDynamicSharedInformerFactory,
initBackendFunc: initWatchBackend,
client: client,
factories: initFactoriesCache(),
watchedGVKs: initWatchedGVKsCache(),
cachedObjects: initCachedObjectsCache(),
handlers: make(map[string]map[schema.GroupVersionKind]watchEventHandler),
onClientUpdate: make(chan store.Store, 10),
}
for _, option := range options {
option(c)
}
if err := c.bootstrap(ctx, false); err != nil {
return nil, err
}
return c, nil
}
func (w *Watch) bootstrap(ctx context.Context, forceBackendInit bool) error {
logger := log.From(ctx)
logger.With("backend-init", forceBackendInit).Debugf("bootstrapping")
if forceBackendInit {
w.factories = initFactoriesCache()
w.watchedGVKs = initWatchedGVKsCache()
w.cachedObjects = initCachedObjectsCache()
w.handlers = make(map[string]map[schema.GroupVersionKind]watchEventHandler)
}
ctx, cancel := context.WithCancel(ctx)
w.cancelFunc = cancel
w.stopCh = ctx.Done()
if _, ok := w.factories.get(""); !ok {
factory, err := w.initFactoryFunc(ctx, w.client, "")
if err != nil {
return errors.Wrap(err, "initialize dynamic shared informer factory")
}
w.factories.set("", factory)
}
if w.backendObjectStore == nil || forceBackendInit {
backendObjectStore, err := w.initBackendFunc(w)
if err != nil {
return errors.Wrap(err, "initial dynamic cache")
}
w.backendObjectStore = backendObjectStore
}
nsKey := store.Key{APIVersion: "v1", Kind: "Namespace"}
nsHandler := &nsUpdateHandler{
watch: w,
logger: log.From(ctx),
}
if err := w.Watch(ctx, nsKey, nsHandler); err != nil {
return errors.Wrap(err, "create namespace watcher")
}
return nil
}
// HasAccess access to objects using a key
func (w *Watch) HasAccess(ctx context.Context, key store.Key, verb string) error {
return w.backendObjectStore.HasAccess(ctx, key, verb)
}
// List lists objects using a key.
func (w *Watch) List(ctx context.Context, key store.Key) ([]*unstructured.Unstructured, error) {
ctx, span := trace.StartSpan(ctx, "watchCacheList")
defer span.End()
if w.backendObjectStore == nil {
return nil, errors.New("backend object store is nil")
}
// TODO: find out why this doesn't work with watch.
logger := log.From(ctx)
if err := w.backendObjectStore.HasAccess(ctx, key, "list"); err != nil {
logger.Errorf("check access failed: %v", err)
return []*unstructured.Unstructured{}, nil
}
gvk := key.GroupVersionKind()
if w.isKeyCached(key) {
var filteredObjects []*unstructured.Unstructured
var selector = labels.Everything()
if key.Selector != nil {
selector = key.Selector.AsSelector()
}
cachedObjects := w.cachedObjects.list(key.Namespace, gvk)
for _, object := range cachedObjects {
if key.Namespace == object.GetNamespace() {
objectLabels := labels.Set(object.GetLabels())
if selector.Matches(objectLabels) {
filteredObjects = append(filteredObjects, object)
}
}
}
return filteredObjects, nil
}
updateCh := make(chan watchEvent)
deleteCh := make(chan watchEvent)
go w.handleUpdates(key, updateCh, deleteCh)
objects, err := w.backendObjectStore.List(ctx, key)
if err != nil {
return nil, err
}
for _, object := range objects {
w.cachedObjects.update(key.Namespace, gvk, object)
}
if err := w.createEventHandler(ctx, key, updateCh, deleteCh); err != nil {
return nil, errors.Wrap(err, "create event handler")
}
w.flagGVKWatched(key, gvk)
return objects, nil
}
// Get gets an object using a key.
func (w *Watch) Get(ctx context.Context, key store.Key) (*unstructured.Unstructured, error) {
ctx, span := trace.StartSpan(ctx, "watchCacheGet")
defer span.End()
if w.backendObjectStore == nil {
return nil, errors.New("backend cached is nil")
}
logger := log.From(ctx)
if err := w.backendObjectStore.HasAccess(ctx, key, "get"); err != nil {
logger.Errorf("check access failed: %v", err)
u := unstructured.Unstructured{}
return &u, nil
}
gvk := key.GroupVersionKind()
if w.isKeyCached(key) {
cachedObjects := w.cachedObjects.list(key.Namespace, gvk)
for _, object := range cachedObjects {
if key.Namespace == object.GetNamespace() &&
key.Name == object.GetName() {
return object, nil
}
}
// TODO: handle not found case
return nil, nil
}
updateCh := make(chan watchEvent)
deleteCh := make(chan watchEvent)
go w.handleUpdates(key, updateCh, deleteCh)
object, err := w.backendObjectStore.Get(ctx, key)
if err != nil {
return nil, err
}
w.cachedObjects.update(key.Namespace, gvk, object)
if err := w.createEventHandler(ctx, key, updateCh, deleteCh); err != nil {
return nil, errors.Wrap(err, "create event handler")
}
w.flagGVKWatched(key, gvk)
return object, nil
}
// Watch watches the cluster given a key and a handler.
func (w *Watch) Watch(ctx context.Context, key store.Key, handler kcache.ResourceEventHandler) error {
if w.backendObjectStore == nil {
return errors.New("backend object store is nil")
}
return w.backendObjectStore.Watch(ctx, key, handler)
}
func (w *Watch) isKeyCached(key store.Key) bool {
return w.watchedGVKs.isWatched(key.Namespace, key.GroupVersionKind())
}
func (w *Watch) handleUpdates(key store.Key, updateCh, deleteCh chan watchEvent) {
defer close(updateCh)
defer close(deleteCh)
done := false
for !done {
select {
case <-w.stopCh:
done = true
case event := <-updateCh:
w.cachedObjects.update(key.Namespace, event.gvk, event.object)
case event := <-deleteCh:
w.cachedObjects.delete(key.Namespace, event.gvk, event.object)
}
}
}
func (w *Watch) createEventHandler(ctx context.Context, key store.Key, updateCh, deleteCh chan watchEvent) error {
handler := &watchEventHandler{
gvk: key.GroupVersionKind(),
updateFunc: func(event watchEvent) {
if event.object == nil {
return
}
updateCh <- event
},
deleteFunc: func(event watchEvent) {
if event.object == nil {
return
}
deleteCh <- event
},
}
if w.client == nil {
return errors.New("cluster client is nil")
}
gvk := key.GroupVersionKind()
gvr, err := w.client.Resource(gvk.GroupKind())
if err != nil {
return errors.Wrap(err, "client resource")
}
factory, ok := w.factories.get(key.Namespace)
if !ok {
if err := w.HasAccess(ctx, store.Key{Namespace: metav1.NamespaceAll}, "watch"); err != nil {
factory, err = w.initFactoryFunc(ctx, w.client, key.Namespace)
if err != nil {
return err
}
} else {
factory, ok = w.factories.get("")
if !ok {
return errors.New("no default DynamicInformerFactory found")
}
}
}
w.factories.set(key.Namespace, factory)
informer, err := currentInformer(gvr, factory, w.stopCh)
if err != nil {
return errors.Wrapf(err, "find informer for key %s", key)
}
informer.Informer().AddEventHandler(handler)
return nil
}
func (w *Watch) flagGVKWatched(key store.Key, gvk schema.GroupVersionKind) {
w.watchedGVKs.setWatched(key.Namespace, gvk)
}
type watchEvent struct {
object *unstructured.Unstructured
gvk schema.GroupVersionKind
}
type watchEventHandler struct {
gvk schema.GroupVersionKind
updateFunc func(event watchEvent)
deleteFunc func(event watchEvent)
}
var _ kcache.ResourceEventHandler = (*watchEventHandler)(nil)
func (h *watchEventHandler) OnAdd(obj interface{}) {
if object, ok := obj.(*unstructured.Unstructured); ok {
event := watchEvent{object: object, gvk: h.gvk}
h.updateFunc(event)
}
}
func (h *watchEventHandler) OnUpdate(oldObj, newObj interface{}) {
if object, ok := newObj.(*unstructured.Unstructured); ok {
event := watchEvent{object: object, gvk: h.gvk}
h.updateFunc(event)
}
}
func (h *watchEventHandler) OnDelete(obj interface{}) {
if object, ok := obj.(*unstructured.Unstructured); ok {
event := watchEvent{object: object, gvk: h.gvk}
h.deleteFunc(event)
}
}
var nsGVK = schema.GroupVersionKind{Version: "v1", Kind: "Namespace"}
type nsUpdateHandler struct {
watch *Watch
logger log.Logger
}
var _ kcache.ResourceEventHandler = (*nsUpdateHandler)(nil)
func (h *nsUpdateHandler) OnAdd(obj interface{}) {
if h.watch.initFactoryFunc == nil {
return
}
if object, ok := obj.(*unstructured.Unstructured); ok && object.GroupVersionKind().String() == nsGVK.String() {
factory, err := h.watch.initFactoryFunc(context.Background(), h.watch.client, object.GetName())
if err != nil {
h.logger.WithErr(err).Errorf("create namespace factory")
return
}
h.logger.With("namespace", object.GetName()).Debugf("adding factory for namespace")
h.watch.factories.set(object.GetName(), factory)
}
}
func (h *nsUpdateHandler) OnUpdate(oldObj, newObj interface{}) {
}
func (h *nsUpdateHandler) OnDelete(obj interface{}) {
if h.watch.initFactoryFunc == nil {
return
}
if object, ok := obj.(*unstructured.Unstructured); ok && object.GroupVersionKind().String() == nsGVK.String() {
h.watch.factories.delete(object.GetName())
h.logger.With("namespace", object.GetName()).Debugf("removed factory for namespace")
}
}
// UpdateClusterClient updates the cluster client.
func (w *Watch) UpdateClusterClient(ctx context.Context, client cluster.ClientInterface) error {
logger := log.From(ctx)
logger.Debugf("watch is updating its cluster client")
w.cancelFunc()
w.client = client
if err := w.bootstrap(ctx, true); err != nil {
return err
}
for _, fn := range w.updateFns {
fn(w)
}
w.onClientUpdate <- w
return nil
}
func (w *Watch) RegisterOnUpdate(fn store.UpdateFn) {
w.updateFns = append(w.updateFns, fn)
}
// Update defers the update to the backend store.
func (w *Watch) Update(ctx context.Context, key store.Key, updater func(*unstructured.Unstructured) error) error {
return w.backendObjectStore.Update(ctx, key, updater)
}
|
package blockchain
import (
"../block"
"log"
"testing"
)
func TestGetBlock(t *testing.T) {
blA := block.GetTestBlock()
AddBlock(blA)
blB, err := GetBlock(blA.BlHash)
if err != nil {
log.Fatal(err)
}
if blA.Timestamp != blB.Timestamp {
log.Fatal("Timestamp blocks don't equals")
}
log.Printf("BlB: %x \n", blB)
blC, err := GetBlock("00000")
if err != nil {
log.Print(err)
}
log.Print(blC)
}
|
package fod
import (
"android/soong/android"
"android/soong/cc"
"strings"
)
func deviceFlags(ctx android.BaseContext) []string {
var cflags []string
var config = ctx.AConfig().VendorConfig("XIAOMI_SDM710_FOD")
var posX = strings.TrimSpace(config.String("POS_X"))
var posY = strings.TrimSpace(config.String("POS_Y"))
var size = strings.TrimSpace(config.String("SIZE"))
cflags = append(cflags, "-DFOD_POS_X=" + posX, "-DFOD_POS_Y=" + posY, "-DFOD_SIZE=" + size)
return cflags
}
func fodHalBinary(ctx android.LoadHookContext) {
type props struct {
Target struct {
Android struct {
Cflags []string
}
}
}
p := &props{}
p.Target.Android.Cflags = deviceFlags(ctx)
ctx.AppendProperties(p)
}
func fodHalBinaryFactory() android.Module {
module, _ := cc.NewBinary(android.HostAndDeviceSupported)
newMod := module.Init()
android.AddLoadHook(newMod, fodHalBinary)
return newMod
}
func init() {
android.RegisterModuleType("xiaomi_sdm710_fod_hal_binary", fodHalBinaryFactory)
}
|
package io
import (
"strconv"
)
// PrintIntsLine returns integers string delimited by a space.
func PrintIntsLine(A ...int) string {
res := []rune{}
for i := 0; i < len(A); i++ {
str := strconv.Itoa(A[i])
res = append(res, []rune(str)...)
if i != len(A)-1 {
res = append(res, ' ')
}
}
return string(res)
}
|
package main
import (
"fmt"
"sync"
)
//func main() {
//}
func main() {
//使用mutex lock控制并发
var mutex sync.Mutex
i := 6
mutex.Lock()
go func() {
fmt.Println(i)
mutex.Unlock()
}()
mutex.Lock()
//使用chan控制并发
done := make(chan bool, 1)
go func() {
fmt.Println("执行子线程")
done <- true
}()
<-done
fmt.Println("执行结束")
}
|
// A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
//
// a^2 + b^2 = c^2
// For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
//
// There exists exactly one Pythagorean triplet for which a + b + c = 1000.
// Find the product abc.
package main
import (
"fmt"
"math"
)
func eq(a, b int) float64 {
af, bf := float64(a), float64(b)
root := math.Sqrt(af*af + bf*bf)
return ( af * (af + root) ) + ( af * bf ) + (bf * (bf + root) )
}
func solveForABC() (int, int, int) {
for a := 1; a < 998; a++ {
for b := 1; b < 998; b++ {
if eq(a, b) == float64(500000) {
return a, b, 1000-a-b
}
}
}
return 0, 0, 0
}
func main() {
a, b, c := solveForABC()
result := a * b * c
fmt.Println(result)
}
|
package piscine
func ToUpper(s string) string {
str := []rune(s)
for index, letter := range str {
if letter >= 'a' && letter <= 'z' {
str[index] = str[index] - 32
}
}
upper := string(str)
return upper
}
|
package cmn
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
func SmartClose(c io.Closer) {
err := c.Close()
if err != nil {
fmt.Printf("closing: %v\n", err)
}
return
}
// Plural returns "s" when there is more than one number n is given
// and returns empty string otherwise.
// It is meant for use with format string like fmt.Printf("%d apple%s", n, Plural(n)).
// Another alternative is just using string concatenation "apple" + Plural(n)
func Plural(n int) string {
if n == 1 {
return ""
}
return "s"
}
func GetBody(url string, client *http.Client) (body io.ReadCloser, err error) {
request, err := http.NewRequest("GET", url, bytes.NewReader([]byte{}))
if err != nil {
err = fmt.Errorf("Preparing request: %v", err)
return
}
response, err := client.Do(request)
if err != nil {
err = fmt.Errorf("Making request: %v", err)
return
}
if response.StatusCode != http.StatusOK {
err = fmt.Errorf("Response status: %s [ %s ]", response.Status, url)
return
}
body = response.Body
return
}
func GetBytes(url string, client *http.Client) (b []byte, err error) {
body, err := GetBody(url, client)
if err != nil {
return
}
defer SmartClose(body)
b, err = ioutil.ReadAll(body)
return
}
func GenerateFilenames(maxIndex int, ext string) (filenames []string) {
maxIndexStr := fmt.Sprintf("%d", maxIndex)
for i := 1; i <= maxIndex; i++ {
indexStr := fmt.Sprintf("%d", i)
formatStr := ""
if len(maxIndexStr)-len(indexStr) > 0 {
formatStr = strings.Repeat("0", len(maxIndexStr)-len(indexStr)) + "%d.%s"
} else {
formatStr = "%d.%s"
}
filenames = append(filenames, fmt.Sprintf(formatStr, i, ext))
}
return
}
|
package acceptance
import (
"context"
"crypto/tls"
"net/http"
ms "github.com/cloudfoundry/metric-store-release/src/pkg/client"
"github.com/cloudfoundry/metric-store-release/src/pkg/rpc/metricstore_v1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"google.golang.org/grpc"
)
var _ = Describe("Metric Store on a CF", func() {
var (
client *ms.Client
cfg *TestConfig
)
Context("using gRPC client", func() {
BeforeEach(func() {
cfg = Config()
client = ms.NewClient(
cfg.MetricStoreAddr,
ms.WithViaGRPC(
grpc.WithTransportCredentials(
cfg.TLS.Credentials("metric-store"),
),
),
)
Eventually(func() error {
_, err := client.PromQL(context.TODO(), "egress{source_id=\"doppler\"}")
return err
}, 60).ShouldNot(HaveOccurred())
})
It("returns results for /api/v1/query", func() {
ctx := context.Background()
result, err := client.PromQL(ctx, "egress{source_id=\"doppler\"}")
Expect(err).ToNot(HaveOccurred())
samples := result.GetVector().GetSamples()
Expect(len(samples)).ToNot(BeZero())
Expect(samples[0].Metric["__name__"]).To(Equal("egress"))
Expect(samples[0].Metric["source_id"]).To(Equal("doppler"))
Expect(samples[0].Point).ToNot(BeNil())
})
})
Context("using HTTP client to traverse the auth proxy", func() {
BeforeEach(func() {
cfg = Config()
oauthClient := newOauth2HTTPClient(cfg)
client = ms.NewClient(
cfg.MetricStoreCFAuthProxyURL,
ms.WithHTTPClient(oauthClient),
)
Eventually(func() error {
_, err := client.PromQL(context.TODO(), "egress{source_id=\"doppler\"}")
return err
}, 60).ShouldNot(HaveOccurred())
})
It("returns results for /api/v1/query", func() {
ctx := context.Background()
result, err := client.PromQL(ctx, "egress{source_id=\"doppler\"}")
Expect(err).ToNot(HaveOccurred())
samples := result.GetVector().GetSamples()
Expect(len(samples)).ToNot(BeZero())
Expect(samples[0].Metric["__name__"]).To(Equal("egress"))
Expect(samples[0].Metric["source_id"]).To(Equal("doppler"))
Expect(samples[0].Point).ToNot(BeNil())
})
})
})
func flattenVector(v *metricstore_v1.PromQL_Vector) []string {
var m []string
for k, v := range v.GetSamples()[0].Metric {
m = append(m, k)
m = append(m, v)
}
return m
}
func newOauth2HTTPClient(cfg *TestConfig) *ms.Oauth2HTTPClient {
oauth_client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: cfg.SkipCertVerify,
},
},
}
return ms.NewOauth2HTTPClient(
cfg.UAAURL,
cfg.ClientID,
cfg.ClientSecret,
ms.WithOauth2HTTPClient(oauth_client),
)
}
|
package config
import (
"encoding/json"
"github.com/iancoleman/orderedmap"
flag "github.com/spf13/pflag"
"github.com/iotaledger/hive.go/app/configuration"
"github.com/iotaledger/hive.go/apputils/parameter"
)
type parameterMapJSON struct {
*orderedmap.OrderedMap
}
func newParameterMapJSON() *parameterMapJSON {
return ¶meterMapJSON{
OrderedMap: orderedmap.New(),
}
}
func (p *parameterMapJSON) AddEntry(entry interface{}) {
switch v := entry.(type) {
case *parameter.ParameterGroup:
newParamMapJSONGroup := newParameterMapJSON()
p.Set(v.Name, newParamMapJSONGroup)
if v.Default == nil {
for _, entry := range v.Entries {
newParamMapJSONGroup.AddEntry(entry)
}
} else {
p.Set(v.Name, v.Default)
}
case *parameter.Parameter:
p.Set(v.Name, v.Default)
default:
panic(parameter.ErrUnknownEntryType)
}
}
func (p *parameterMapJSON) PrettyPrint(prefix string, ident string) string {
data, err := json.MarshalIndent(p, prefix, ident)
if err != nil {
panic(err)
}
return string(data)
}
func prettyPrintParameterGroup(g *parameter.ParameterGroup, prefix string, indent string) string {
paramMapJSON := newParameterMapJSON()
paramMapJSON.AddEntry(g)
return paramMapJSON.PrettyPrint(prefix, indent)
}
func GetDefaultAppConfigJSON(config *configuration.Configuration, flagset *flag.FlagSet, ignoreFlags map[string]struct{}) string {
paramMapJSON := newParameterMapJSON()
for _, group := range parameter.ParseConfigParameterGroups(config, flagset, ignoreFlags) {
paramMapJSON.AddEntry(group)
}
return paramMapJSON.PrettyPrint("", " ") + "\n"
}
|
package main
import (
"fmt"
"time"
)
type Flag int64
func (f Flag) String() string {
return fmt.Sprintf("0x%b", f)
}
func main() {
fmt.Println("ok")
fmt.Println(fmt.Sprintf("0x%b", 0xb))
fmt.Println(Flag(1))
time.Now()
}
|
package wire_test
import (
"errors"
"testing"
"github.com/hoistup/hoist-go/wire"
"github.com/matryer/is"
)
func TestEncode(t *testing.T) {
type MyDetails struct {
ServiceName string `json:"svc"`
FuncName string `json:"fn"`
}
table := []struct {
Name string
Details interface{}
Params interface{}
ExpectedEncoding string
ExpectedError error
}{
{
Name: "with nil details and nil params",
Details: nil,
Params: nil,
ExpectedError: wire.ErrNilDetails,
},
{
Name: "with present details and nil params",
Details: MyDetails{
ServiceName: "myService",
FuncName: "myFunc",
},
Params: nil,
ExpectedEncoding: `1,33,4:{"svc":"myService","fn":"myFunc"}null`,
},
{
Name: "with present details and present string params",
Details: MyDetails{
ServiceName: "myService2",
FuncName: "myFunc2",
},
Params: "a param",
ExpectedEncoding: `1,35,9:{"svc":"myService2","fn":"myFunc2"}"a param"`,
},
{
Name: "with present details and present map params",
Details: MyDetails{
ServiceName: "myService2",
FuncName: "myFunc2",
},
Params: map[string]interface{}{
"Param1": "abc",
"Param2": "xyz",
},
ExpectedEncoding: `1,35,31:{"svc":"myService2","fn":"myFunc2"}{"Param1":"abc","Param2":"xyz"}`,
},
{
Name: "with present details and present struct params",
Details: MyDetails{
ServiceName: "myService",
FuncName: "myFunc2",
},
Params: struct {
Message string `json:"msg"`
}{
Message: "hello",
},
ExpectedEncoding: `1,34,15:{"svc":"myService","fn":"myFunc2"}{"msg":"hello"}`,
},
{
Name: "with invalid details",
Details: make(chan struct{}),
Params: "a string",
ExpectedError: wire.ErrUnableToEncodeDetails,
},
{
Name: "with invalid params",
Details: MyDetails{
ServiceName: "myService",
FuncName: "myFunc2",
},
Params: make(chan struct{}),
ExpectedError: wire.ErrUnableToEncodeParams,
},
}
for _, entry := range table {
t.Run(entry.Name, func(t *testing.T) {
is := is.New(t)
encoding, err := wire.Encode(entry.Details, entry.Params)
is.True(errors.Is(err, entry.ExpectedError))
is.Equal(string(encoding), entry.ExpectedEncoding)
})
}
}
|
package database
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
)
// State used to control the state of the chain
type State struct {
Balances map[Account]uint
txMempool []Tx
dbFile *os.File
latestBlockHash Hash
}
// NewStateFromDisk -> load all state information
func NewStateFromDisk() (*State, error) {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
gen, err := loadgen(filepath.Join(cwd, "database", "gen.json"))
if err != nil {
return nil, err
}
var balances State
balances.Balances = gen.Balances
f, err := os.OpenFile(filepath.Join(cwd, "database", "block.db"), os.O_APPEND|os.O_RDWR, 0600)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(f)
state := &State{
Balances: balances.Balances,
txMempool: []Tx{},
latestBlockHash: Hash{},
dbFile: f,
}
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return nil, err
}
var blockFs BlockFS
blockFsJson := scanner.Bytes()
err = json.Unmarshal(blockFsJson, &blockFs)
if err != nil {
return nil, err
}
if err := state.applyBlock(blockFs.Value); err != nil {
return nil, err
}
state.latestBlockHash = blockFs.Key
}
return state, nil
}
// apply -> apply tx to a certain State
func (s *State) apply(tx Tx) error {
if tx.IsReward() {
s.Balances[tx.To] += tx.Value
return nil
}
if s.Balances[tx.From] < tx.Value {
return fmt.Errorf("insufficient balance")
}
s.Balances[tx.From] -= tx.Value
s.Balances[tx.To] += tx.Value
return nil
}
// LatestBlockHash -> get the last snapshot of tx.db transactions
func (s *State) LatestBlockHash() Hash {
return s.latestBlockHash
}
// Close -> close file overwrite for State
func (s *State) Close() {
s.dbFile.Close()
}
// applyBlock apply the transaction on blocks that exist
func (s *State) applyBlock(b Block) error {
for _, tx := range b.TXs {
if err := s.apply(tx); err != nil {
return err
}
}
return nil
}
// AddBlock add transaction to the block
func (s *State) AddBlock(b Block) error {
for _, tx := range b.TXs {
if err := s.AddTx(tx); err != nil {
return err
}
}
return nil
}
// Add -> add new tx to mempool
func (s *State) AddTx(tx Tx) error {
if err := s.apply(tx); err != nil {
return err
}
s.txMempool = append(s.txMempool, tx)
return nil
}
// Persist -> persist transactions into tx file
func (s *State) Persist() (Hash, error) {
block := NewBlock(s.latestBlockHash, uint64(time.Now().Unix()), s.txMempool)
blockHash, err := block.Hash()
if err != nil {
return Hash{}, err
}
blockFs := BlockFS{blockHash, block}
blockFsJson, err := json.Marshal(blockFs)
if err != nil {
return Hash{}, err
}
fmt.Printf("Persisting new Block to disk:\n")
fmt.Printf("\t%s\n", blockFsJson)
if _, err := s.dbFile.Write(append(blockFsJson, '\n')); err != nil {
return Hash{}, err
}
s.latestBlockHash = blockHash
s.txMempool = []Tx{}
return s.latestBlockHash, nil
}
|
package configuration
import (
"io/ioutil"
"log"
"gopkg.in/yaml.v2"
)
// DbConfiguration : conf pour la connexion PG
type DbConfiguration struct {
Host string
Port int64
User string
Password string
Schema string
}
// HTTPConfiguration : la configuration pour le serveur http
type HTTPConfiguration struct {
Port int64
Context string
}
// Configuration : structure contenant les paramètres de configuration de l'application
type Configuration struct {
Pg DbConfiguration
HTTP HTTPConfiguration
Version string
Prettyprint bool
JWT string
}
var (
configuration Configuration
configFile = "application.yaml"
)
func init() {
LoadConfiguration()
}
// LoadConfiguration : chargement de la configuration.
// Ne pas faire dans l'init parce que sinon pas de TU parces qu'il trouve pas le fichier de conf
func LoadConfiguration() {
log.Println("Chargement de la configuration ...")
source, err := ioutil.ReadFile(configFile)
if err != nil {
panic(err)
}
err = yaml.Unmarshal(source, &configuration)
if err != nil {
panic(err)
}
log.Println("Configuration chargée !")
}
// GetConfiguration : retourne la configuration de l'application indiquée dans le fichier application.yaml
func GetConfiguration() Configuration {
return configuration
}
// ReloadConfiguration : rechargement de la configuration de l'application
func ReloadConfiguration() {
log.Println("Rechargement de la configuration demandé")
LoadConfiguration()
}
|
package mocks
import (
"github.com/spothero/optimizely-sdk-go/api"
"github.com/stretchr/testify/mock"
)
// Client mocks out the OptimizelyAPI interface for use in testing
type Client struct {
mock.Mock
}
func (c *Client) GetDatafile(environmentName string, projectID int) ([]byte, error) {
call := c.Called(environmentName, projectID)
return call.Get(0).([]byte), call.Error(1)
}
func (c *Client) GetEnvironmentByProjectID(name string, projectID int) (api.Environment, error) {
call := c.Called(name, projectID)
return call.Get(0).(api.Environment), call.Error(1)
}
func (c *Client) GetEnvironmentByProjectName(name, projectName string) (api.Environment, error) {
call := c.Called(name, projectName)
return call.Get(0).(api.Environment), call.Error(1)
}
func (c *Client) GetEnvironmentsByProjectID(projectID int) ([]api.Environment, error) {
call := c.Called(projectID)
return call.Get(0).([]api.Environment), call.Error(1)
}
func (c *Client) GetEnvironmentsByProjectName(projectName string) ([]api.Environment, error) {
call := c.Called(projectName)
return call.Get(0).([]api.Environment), call.Error(1)
}
func (c *Client) GetProjects() ([]api.Project, error) {
call := c.Called()
return call.Get(0).([]api.Project), call.Error(1)
}
func (c *Client) ReportEvents(events []byte) error {
return c.Called(events).Error(0)
}
|
package server
import (
"database/sql"
"testing"
"github.com/dhaifley/dlib"
)
type MockDBSession struct{}
func (m *MockDBSession) Close() error {
return nil
}
func (m *MockDBSession) Exec(query string, args ...interface{}) (sql.Result, error) {
return nil, nil
}
func (m *MockDBSession) Query(query string, args ...interface{}) (dlib.SQLRows, error) {
return nil, nil
}
func (m *MockDBSession) Ping() error {
return nil
}
func (m *MockDBSession) Stats() sql.DBStats {
return sql.DBStats{OpenConnections: 1}
}
func TestServerConnectSQL(t *testing.T) {
s := Server{}
err := s.ConnectSQL(&MockDBSession{})
if err != nil {
t.Error(err)
}
defer s.Close()
}
func TestServerClose(t *testing.T) {
s := Server{}
defer s.Close()
}
|
package tsm
/*
#cgo pkg-config: libtsm
*/
import "C"
|
package utils
import (
"fmt"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var numberRunes = []rune("0123456789")
var charRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")
// RandNumberString retuns rand string of number in length of the given length
func RandNumberString(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = numberRunes[rand.Intn(len(numberRunes))]
}
return string(b)
}
// RandCharString retuns rand string of number in length of the given length
func RandCharString(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = charRunes[rand.Intn(len(charRunes))]
}
return string(b)
}
// RandNumberStringSlice generates codes for a ticket and
func RandNumberStringSlice(count uint, length uint) []string {
codes := make([]string, count, count)
for i := 0; i < int(count); i++ {
codes[i] = RandNumberString(int(length))
}
return codes
}
// RandPhoneNumber random chinese phone number prefixed with 13
func RandPhoneNumber() string {
return fmt.Sprintf("13%v", RandNumberString(9))
}
|
package middleware
import (
"fmt"
"net/http"
"github.com/dgrijalva/jwt-go"
)
var mySigningKey = []byte("top-secret-signin-value-key")
// Auth validates all requests for a valid token
func Auth(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header["Authorization"] != nil {
token, err := jwt.Parse(r.Header["Authorization"][0], func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("There was an error")
}
return mySigningKey, nil
})
if err != nil {
w.WriteHeader(http.StatusUnauthorized)
fmt.Fprintf(w, "Error: Invalid Token")
return
}
if token.Valid {
next.ServeHTTP(w, r)
return
}
}
w.WriteHeader(http.StatusUnauthorized)
fmt.Fprintf(w, "Error: Token not found")
})
}
|
package replicatectl
import (
"context"
"errors"
"path"
"github.com/operator-framework/operator-lib/status"
regv1 "github.com/tmax-cloud/registry-operator/api/v1"
"github.com/tmax-cloud/registry-operator/internal/schemes"
"github.com/tmax-cloud/registry-operator/internal/utils"
"github.com/tmax-cloud/registry-operator/pkg/registry"
corev1 "k8s.io/api/core/v1"
k8serr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// NewImageSignRequest ...
func NewImageSignRequest(dependentJob *RegistryJob) *ImageSignRequest {
return &ImageSignRequest{dependentJob: dependentJob}
}
// ImageSignRequest ...
type ImageSignRequest struct {
dependentJob *RegistryJob
isr *regv1.ImageSignRequest
logger *utils.RegistryLogger
}
// Handle is to create image sign request.
func (r *ImageSignRequest) Handle(c client.Client, repl *regv1.ImageReplicate, patchExreg *regv1.ImageReplicate, scheme *runtime.Scheme) error {
if !r.dependentJob.IsSuccessfullyCompleted(c, repl) {
return errors.New("ImageSignRequest: registry job is not completed succesfully")
}
if err := r.get(c, repl); err != nil {
if k8serr.IsNotFound(err) {
if err := r.create(c, repl, patchExreg, scheme); err != nil {
r.logger.Error(err, "create image replicate image sign request error")
return err
}
} else {
r.logger.Error(err, "image replicate image sign request error")
return err
}
}
return nil
}
// Ready is to check if image sign request is ready
func (r *ImageSignRequest) Ready(c client.Client, repl *regv1.ImageReplicate, patchRepl *regv1.ImageReplicate, useGet bool) error {
var existErr error = nil
existCondition := &status.Condition{
Status: corev1.ConditionFalse,
Type: regv1.ConditionTypeImageReplicateImageSignRequestExist,
}
condition1 := &status.Condition{}
condition2 := &status.Condition{}
if useGet {
if existErr = r.get(c, repl); existErr != nil {
r.logger.Error(existErr, "get image sign request error")
return existErr
}
}
defer utils.SetCondition(existErr, patchRepl, existCondition)
if r.isr == nil {
existErr = errors.New("image sign request is not found")
return existErr
}
existCondition.Status = corev1.ConditionTrue
switch repl.Status.State {
case regv1.ImageReplicatePending, regv1.ImageReplicateProcessing:
if r.isr.Status.ImageSignResponse == nil {
err := errors.New("ImageSignResponse is nil")
r.logger.Error(err, "")
return err
}
if r.isr.Status.ImageSignResponse.Result == regv1.ResponseResultSigning ||
r.isr.Status.ImageSignResponse.Result == regv1.ResponseResultSuccess ||
r.isr.Status.ImageSignResponse.Result == regv1.ResponseResultFail {
condition1.Status = corev1.ConditionTrue
condition1.Type = regv1.ConditionTypeImageReplicateImageSigning
defer utils.SetCondition(nil, patchRepl, condition1)
}
condition2.Status = corev1.ConditionUnknown
condition2.Type = regv1.ConditionTypeImageReplicateImageSigningSuccess
defer utils.SetCondition(nil, patchRepl, condition2)
if r.isr.Status.ImageSignResponse.Result == regv1.ResponseResultSuccess {
condition2.Status = corev1.ConditionTrue
break
}
if r.isr.Status.ImageSignResponse.Result == regv1.ResponseResultFail {
condition2.Status = corev1.ConditionFalse
break
}
}
return nil
}
func (r *ImageSignRequest) create(c client.Client, repl *regv1.ImageReplicate, patchRepl *regv1.ImageReplicate, scheme *runtime.Scheme) error {
if r.isr == nil {
image, err := r.getImageFullName(c, repl)
if err != nil {
r.logger.Error(err, "failed to get image full name")
return err
}
reg := types.NamespacedName{Namespace: repl.Spec.ToImage.RegistryNamespace, Name: repl.Spec.ToImage.RegistryName}
imagePullSecret, err := registry.GetLoginSecret(c, reg, repl.Spec.ToImage.RegistryType)
if err != nil {
r.logger.Error(err, "failed to get login secret")
return err
}
certificate, err := registry.GetCertSecret(c, reg, repl.Spec.ToImage.RegistryType)
if err != nil {
r.logger.Error(err, "failed to get certificate")
return err
}
r.isr = schemes.ImageReplicateImageSignRequest(repl, image, imagePullSecret, certificate)
}
if err := controllerutil.SetControllerReference(repl, r.isr, scheme); err != nil {
r.logger.Error(err, "SetOwnerReference Failed")
return err
}
r.logger.Info("Create image replicate image sign request")
if err := c.Create(context.TODO(), r.isr); err != nil {
r.logger.Error(err, "Creating image replicate image sign request is failed.")
return err
}
patchRepl.Status.ImageSignRequestName = r.isr.Name
return nil
}
func (r *ImageSignRequest) get(c client.Client, repl *regv1.ImageReplicate) error {
r.logger = utils.NewRegistryLogger(*r, repl.Namespace, schemes.SubresourceName(repl, schemes.SubTypeImageReplicateImageSignRequest))
image, err := r.getImageFullName(c, repl)
if err != nil {
r.logger.Error(err, "failed to get image full name")
return err
}
reg := types.NamespacedName{Namespace: repl.Spec.ToImage.RegistryNamespace, Name: repl.Spec.ToImage.RegistryName}
imagePullSecret, err := registry.GetLoginSecret(c, reg, repl.Spec.ToImage.RegistryType)
if err != nil {
r.logger.Error(err, "failed to get login secret")
return err
}
r.logger.Info("get", "imagePullSecret", imagePullSecret, "namespace", reg.Namespace)
certificate, err := registry.GetCertSecret(c, reg, repl.Spec.ToImage.RegistryType)
if err != nil {
r.logger.Error(err, "failed to get certificate")
return err
}
r.logger.Info("get", "certificate", certificate, "namespace", reg.Namespace)
r.isr = schemes.ImageReplicateImageSignRequest(repl, image, imagePullSecret, certificate)
req := types.NamespacedName{Name: r.isr.Name, Namespace: r.isr.Namespace}
if err := c.Get(context.TODO(), req, r.isr); err != nil {
r.logger.Error(err, "Get image replicate image sign request is failed")
r.isr = nil
return err
}
return nil
}
func (r *ImageSignRequest) getImageFullName(c client.Client, repl *regv1.ImageReplicate) (string, error) {
reg := types.NamespacedName{Name: repl.Spec.ToImage.RegistryName, Namespace: repl.Spec.ToImage.RegistryNamespace}
url, err := registry.GetURL(c, reg, repl.Spec.ToImage.RegistryType)
if err != nil {
r.logger.Error(err, "failed to get url", "registryType", repl.Spec.ToImage.RegistryType, "registryName", reg.Name, "registryNamespace", reg.Namespace)
return "", err
}
url = utils.TrimHTTPScheme(url)
image := path.Join(url, repl.Spec.ToImage.Image)
return image, nil
}
|
package middleware
import (
"context"
"strings"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/stats"
"github.com/caos/zitadel/internal/api/http"
"github.com/caos/zitadel/internal/tracing"
)
type GRPCMethod string
func TracingStatsServer(ignoredMethods ...GRPCMethod) grpc.ServerOption {
return grpc.StatsHandler(
&tracingServerHandler{
ignoredMethods,
ocgrpc.ServerHandler{
StartOptions: trace.StartOptions{
Sampler: tracing.Sampler(),
SpanKind: trace.SpanKindServer,
},
},
},
)
}
func DefaultTracingStatsServer() grpc.ServerOption {
return TracingStatsServer(http.Healthz, http.Readiness, http.Validation)
}
type tracingServerHandler struct {
IgnoredMethods []GRPCMethod
ocgrpc.ServerHandler
}
func (s *tracingServerHandler) TagRPC(ctx context.Context, tagInfo *stats.RPCTagInfo) context.Context {
for _, method := range s.IgnoredMethods {
if strings.HasSuffix(tagInfo.FullMethodName, string(method)) {
return ctx
}
}
return s.ServerHandler.TagRPC(ctx, tagInfo)
}
|
package mercedes
import (
"time"
"github.com/evcc-io/evcc/provider"
)
// Provider implements the vehicle api
type Provider struct {
chargerG func() (EVResponse, error)
rangeG func() (EVResponse, error)
}
// NewProvider creates a vehicle api provider
func NewProvider(api *API, vin string, cache time.Duration) *Provider {
impl := &Provider{
chargerG: provider.Cached(func() (EVResponse, error) {
return api.Soc(vin)
}, cache),
rangeG: provider.Cached(func() (EVResponse, error) {
return api.Range(vin)
}, cache),
}
return impl
}
// Soc implements the api.Vehicle interface
func (v *Provider) Soc() (float64, error) {
res, err := v.chargerG()
if err == nil {
return float64(res.Soc.Value), nil
}
return 0, err
}
// Range implements the api.VehicleRange interface
func (v *Provider) Range() (rng int64, err error) {
res, err := v.rangeG()
if err == nil {
return int64(res.RangeElectric.Value), nil
}
return 0, err
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package telemetryextension
import (
"context"
"chromiumos/tast/local/bundles/cros/telemetryextension/dep"
"chromiumos/tast/local/bundles/cros/telemetryextension/vendorutils"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: HasOEMName,
Desc: "Verifies that DUT has correct OEM name",
Contacts: []string{
"chromeos-oem-services@google.com", // Use team email for tickets.
"bkersting@google.com",
"lamzin@google.com",
},
Attr: []string{"group:telemetry_extension_hw"},
Params: []testing.Param{
{
Name: "asus",
Val: "ASUS",
ExtraHardwareDeps: dep.AsusModels(),
},
{
Name: "hp",
Val: "HP",
ExtraHardwareDeps: dep.HPModels(),
},
},
})
}
// HasOEMName tests that DUT has correct OEM name which comes from
// - /sys/devices/virtual/dmi/id/sys_vendor (old approach) or
// - /sys/firmware/vpd/ro/oem_name (new approach for unreleased models) or
// - CrOSConfig (new approach).
func HasOEMName(ctx context.Context, s *testing.State) {
oemName, ok := s.Param().(string)
if !ok {
s.Fatal("Failed to convert params value into string: ", s.Param())
}
if vendor, err := vendorutils.FetchVendor(ctx); err != nil {
s.Error("Failed to read vendor name: ", err)
} else if got, want := vendor, oemName; got != want {
s.Errorf("Unexpected vendor name: got %q, want %q", got, want)
}
}
|
package http
import (
"github.com/gin-gonic/gin"
"github.com/saxon134/go-utils/saData"
"github.com/saxon134/go-utils/saData/saHit"
"github.com/saxon134/go-utils/saHttp"
"github.com/saxon134/workflow/api"
"github.com/saxon134/workflow/conf"
"strings"
)
type Handle func(c *api.Context) (res *api.Response, err error)
func Get(group *gin.RouterGroup, path string, handler Handle, auth ...AuthType) {
group.GET(path, func(c *gin.Context) {
get(c, handler, auth...)
})
}
// Gett 仅为了对齐好看点
func Gett(group *gin.RouterGroup, path string, handler Handle, auth ...AuthType) {
Get(group, path, handler, auth...)
}
func Post(group *gin.RouterGroup, path string, handler Handle, auth ...AuthType) {
group.POST(path, func(c *gin.Context) {
post(c, handler, auth...)
})
}
func GetAndPost(group *gin.RouterGroup, path string, handler Handle, auth ...AuthType) {
group.POST(path, func(c *gin.Context) {
post(c, handler, auth...)
})
group.GET(path, func(c *gin.Context) {
get(c, handler, auth...)
})
}
func get(gc *gin.Context, handle Handle, auth ...AuthType) {
var err error
var c = &api.Context{
Context: gc,
}
//权限校验
if err = authCheck(c, auth...); err != nil {
ResErr(c, err)
return
}
//如果前端传了身份信息,则需要解析
token := gc.GetHeader("Authorization")
if len(token) > 0 {
if c.Account.Id == 0 {
_ = saHttp.JwtParse(token, conf.Conf.Http.JwtSecret, &c.Account)
}
}
//分页
c.Paging.Limit = 20
c.Paging.Offset = 0
c.Paging.Valid = false
var limit = 0
{
if s, ok := gc.GetQuery("pageSize"); ok {
if v, _ := saData.ToInt(s); v > 0 {
limit = v
}
}
if limit == 0 {
if s, ok := gc.GetQuery("size"); ok {
if v, _ := saData.ToInt(s); v > 0 {
limit = v
}
}
}
if limit == 0 {
if s, ok := gc.GetQuery("limit"); ok {
if v, _ := saData.ToInt(s); v > 0 {
limit = v
}
}
}
if limit > 0 {
c.Paging.Valid = true
c.Paging.Limit = limit
} else {
c.Paging.Valid = false
c.Paging.Limit = 20
}
}
var offset, page = 0, 0
if c.Paging.Valid {
s, _ := gc.GetQuery("pageNumber")
if page, _ = saData.ToInt(s); page > 0 {
offset = c.Paging.Limit * (page - 1)
}
if offset <= 0 {
s, _ = gc.GetQuery("page")
if page, _ = saData.ToInt(s); page > 0 {
offset = c.Paging.Limit * (page - 1)
}
}
if offset <= 0 {
s, _ = gc.GetQuery("current")
if page, _ = saData.ToInt(s); page > 0 {
offset = c.Paging.Limit * (page - 1)
}
}
if offset <= 0 {
s, _ = gc.GetQuery("offset")
offset, _ = saData.ToInt(s)
}
c.Paging.Offset = saHit.Int(offset >= 0, offset, 0)
// 总数,一般第一次后端count,之后前端传
s, _ = gc.GetQuery("total")
c.Paging.Total, _ = saData.ToInt64(s)
}
//排序
if s, _ := gc.GetQuery("sort"); len(s) > 0 {
ary := strings.Split(s, "__")
c.Sort.Key = saData.SnakeStr(ary[0])
c.Sort.Desc = len(ary) == 2 && ary[1] == "desc"
}
//执行handle函数
var res *api.Response
res, err = handle(c)
if err != nil {
ResErr(c, err)
return
}
Res(c, res)
}
func post(gc *gin.Context, handle Handle, auth ...AuthType) {
var err error
c := &api.Context{
Context: gc,
}
//权限校验
if err = authCheck(c, auth...); err != nil {
ResErr(c, err)
return
}
//如果前端传了身份信息,则需要解析
token := gc.GetHeader("Authorization")
if c.Account.Id == 0 {
_ = saHttp.JwtParse(token, conf.Conf.Http.JwtSecret, &c.Account)
}
//执行handle函数
var res *api.Response
res, err = handle(c)
if err != nil {
ResErr(c, err)
return
}
Res(c, res)
}
|
package timeparser
import (
"testing"
"time"
)
func TestParseAtTime_Empty(t *testing.T) {
got, err := ParseAtTime("", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
TestTimeNearlyEqual(t, got, time.Now())
}
func TestParseAtTime_UnixTime(t *testing.T) {
got, err := ParseAtTime("100", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if got != time.Unix(100, 0) {
t.Fatalf("\nExpected: %+v\nActual: %+v", time.Unix(100, 0), got)
}
}
func TestParseAtTime_CurrentTime(t *testing.T) {
got, err := ParseAtTime("now", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
TestTimeNearlyEqual(t, got, time.Now())
}
func TestParseAtTime_RelativePlus(t *testing.T) {
got, err := ParseAtTime("now+3d", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
TestTimeNearlyEqual(t, got, time.Now().AddDate(0, 0, 3))
}
func TestParseAtTime_RelativeMinus(t *testing.T) {
got, err := ParseAtTime("now-30d", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
TestTimeNearlyEqual(t, got, time.Now().AddDate(0, 0, -30))
}
func TestParseAtTime_Absolute(t *testing.T) {
got, err := ParseAtTime("19:22_20161010", time.UTC)
if err != nil {
t.Fatalf("err: %s", err)
}
expected := time.Date(2016, 10, 10, 19, 22, 0, 0, time.UTC)
if expected != got {
t.Fatalf("\nExpected: %+v\nActual: %+v", expected, got)
}
}
func TestParseAtTime_Timezone(t *testing.T) {
loc, _ := time.LoadLocation("Asia/Tokyo")
got, err := ParseAtTime("19:22_20161010", loc)
if err != nil {
t.Fatalf("err: %s", err)
}
expected := time.Date(2016, 10, 10, 19, 22, 0, 0, loc)
if expected != got {
t.Fatalf("\nExpected: %+v\nActual: %+v", expected, got)
}
}
func TestParseTimeOffset(t *testing.T) {
tests := []struct {
offset string
duration time.Duration
}{
{"", time.Duration(0)},
{"-", time.Duration(0)},
{"+", time.Duration(0)},
{"10days", time.Duration(10 * 24 * time.Hour)},
{"0days", time.Duration(0)},
{"-10days", time.Duration(-10 * 24 * time.Hour)},
{"5seconds", time.Duration(5 * time.Second)},
{"5minutes", time.Duration(5 * time.Minute)},
{"5hours", time.Duration(5 * time.Hour)},
{"5weeks", time.Duration(5 * 7 * 24 * time.Hour)},
{"1month", time.Duration(30 * 24 * time.Hour)},
{"2months", time.Duration(60 * 24 * time.Hour)},
{"12months", time.Duration(360 * 24 * time.Hour)},
{"1year", time.Duration(365 * 24 * time.Hour)},
{"2years", time.Duration(730 * 24 * time.Hour)},
}
for i, tc := range tests {
got, err := ParseTimeOffset(tc.offset)
if err != nil {
t.Fatalf("err: %s", err)
}
if tc.duration != got {
t.Fatalf("\nExpected: %+v\nActual: %+v (#%d)", tc.duration, got, i)
}
}
}
|
// 189.godoc 離線 本地 文件說明書
// 確認有沒有裝 go get -v golang.org/x/tools/cmd/godoc
// CMD >godoc -http=:8080
// 預覽器 > http://localhost:8080/pkg/
// ex
// godoc fmt Printf
// godoc -src fmt Printf
// go doc fmt.Printf
package mymath
// Sum() 備註
func Sum(xi ...int) int {
sum := 0
for _, v := range xi {
sum += v
}
return sum
}
|
package main
/*
On an N x N board, the numbers from 1 to N*N are written boustrophedonically starting from the bottom left of the board, and alternating direction each row. For example, for a 6 x 6 board, the numbers are written as follows:
36 35 34 33 32 31
25 26 27 28 29 30
24 23 22 21 20 19
13 14 15 16 17 18
12 11 10 09 08 07
01 02 03 04 05 06
You start on square 1 of the board (which is always in the last row and first column). Each move, starting from square x, consists of the following:
You choose a destination square S with number x+1, x+2, x+3, x+4, x+5, or x+6, provided this number is <= N*N.
If S has a snake or ladder, you move to the destination of that snake or ladder. Otherwise, you move to S.
A board square on row r and column c has a "snake or ladder" if board[r][c] != -1. The destination of that snake or ladder is board[r][c].
Note that you only take a snake or ladder at most once per move: if the destination to a snake or ladder is the start of another snake or ladder, you do not continue moving.
Return the least number of moves required to reach square N*N. If it is not possible, return -1.
Example 1:
Input: [
[-1,-1,-1,-1,-1,-1],
[-1,-1,-1,-1,-1,-1],
[-1,-1,-1,-1,-1,-1],
[-1,35,-1,-1,13,-1],
[-1,-1,-1,-1,-1,-1],
[-1,15,-1,-1,-1,-1]]
Output: 4
Explanation:
At the beginning, you start at square 1 [at row 5, column 0].
You decide to move to square 2, and must take the ladder to square 15.
You then decide to move to square 17 (row 3, column 5), and must take the snake to square 13.
You then decide to move to square 14, and must take the ladder to square 35.
You then decide to move to square 36, ending the game.
It can be shown that you need at least 4 moves to reach the N*N-th square, so the answer is 4.
Note:
2 <= board.length = board[0].length <= 20
board[i][j] is between 1 and N*N or is equal to -1.
The board square with number 1 has no snake or ladder.
The board square with number N*N has no snake or ladder.
*/
import (
"fmt"
"math"
)
/*
36 35 34 33 32 31
25 26 27 28 29 30
24 23 22 21 20 19
13 14 15 16 17 18
12 11 10 09 08 07
01 02 03 04 05 06
*/
func main() {
fmt.Println(snakesAndLadders([][]int{
{-1,-1,-1,-1,-1,-1},
{-1,-1,-1,-1,-1,-1},
{1,-1,-1,-1,-1,-1},
{-1,35,-1,-1,13,-1},
{-1,-1,-1,-1,-1,-1},
{-1,15,-1,-1,-1,-1}}))
fmt.Println(snakesAndLadders([][]int{
{1,1,-1},
{1,1,1},
{-1,1,1}}))
fmt.Println(snakesAndLadders([][]int{
{-1,-1,-1},
{-1,9,8},
{-1,8,9}}))
fmt.Println(snakesAndLadders([][]int{
{-1,1,2,-1},
{2,13,15,-1},
{-1,10,-1,-1},
{-1,6,2,8}})) // 2 //*/
/*
21 22 23 24 25
20 19 18 17 16
11 12 13 14 15
10 9 8 7 6
1 2 3 4 5
*/
fmt.Println(snakesAndLadders([][]int{
{-1,-1,19,10,-1},
{2,-1,-1,6,-1},
{-1,17,-1,19,-1},
{25,-1,20,-1,-1},
{-1,-1,-1,-1,15}})) //2
}
func snakesAndLadders(board [][]int) int {
/*
As we are looking for a shortest path, a breadth-first search is ideal.
BFS 解决最短路径问题...
BFS 和DFS 不同 BFS 是正向考虑问题的.dist记录到开始点的最近路径距离
*/
// bfs
n := len(board)
queue := make([]int,0)
// 一方面记录结果,另一方面用于访问元素去重复.
// key是顶点,value表示该顶点距离起点的最小距离. bfs,从前往后保证最短路径. dist用来记录结果.
dist := make(map[int]int)
// 初始化,从1出发
dist[1]=0
queue = append(queue,1)
for len(queue) > 0 {
s := queue[0] // 当前访问位置s
queue = queue[1:]
if s >= n*n {
return dist[s]
}
for s2:=s+1;s2<=s+6&&s2<=n*n;s2++ {
//下一个可能位置s2.
// 两种可能,如果是-1,则本身位置是下一个
// 否则就是board中的值,涉及到坐标变化.trans
i, j := trans(s2, n)
ns := board[i][j]
if board[i][j] == -1 {
ns = s2
}
// 如果目的节点ns没访问过,则他肯定最小是dist[s]+1,并且放到队列做bfs
if _,ok := dist[ns];!ok {
dist[ns]=dist[s]+1
queue = append(queue,ns)
}
}
}
return -1
}
func trans(start int,n int) (int,int) {
i := n-(start-1)/n-1
j := (start-1)%n
if (n-i) % 2 == 0 {
j = n-1-(start-1)%n
}
return i,j
}
//////////////////////////////////////////////////
func snakesAndLadders1(board [][]int) int {
cache := make([]int,(len(board))*(len(board))+1)
min := solve(1,board,len(board),&cache)
if min < 0 {
return -1
}
return min
}
func solve(start int, board [][]int, n int,cache *[]int) int {
min := math.MaxInt64
for index:=start+1;index<=start+6;index++ {
if index >= n*n {
(*cache)[start]=1
return 1
}
i,j := trans(index,n)
nstart := index
if board[i][j] != -1 {
nstart=board[i][j]
}
if nstart >= n*n {
(*cache)[start]=1
return 1
}
if (*cache)[nstart] == 0 {
(*cache)[nstart]=-1
r := solve(nstart, board, n, cache)
if r > 0 && min > r {
min = r
}
} else {
r := (*cache)[nstart]
if r>0 && min > r {min=r}
}
}
if min == math.MaxInt64 {
(*cache)[start]=-1
return -1
}
(*cache)[start]=min+1
return min+1
}
|
package array
// 左闭右闭区间
func BinarySearch(arr []int, n int, target int) int {
if n == 0 {
return -1
}
// 定义闭区间 [left, right],那么之后就是在这个闭区间中找 target
left := 0
right := n - 1
for left <= right { // left == right 表示数组中还有一个元素
mid := left + (right-left)>>1
if arr[mid] == target {
return mid
}
if arr[mid] < target { // target 更大,要去右边找
left = mid + 1
} else {
right = mid - 1
}
}
return -1
}
// 左闭右开区间
func BinarySearch1(arr []int, n int, target int) int {
if n == 0 {
return -1
}
// 定义闭区间 [left, right),那么之后就是在这个闭区间中找 target
left := 0
right := n
for left < right { // left == right-1 时表示数组中还有一个元素
// mid := left + (right-left)>>1
mid := left + (right-left-1)>>1
if arr[mid] == target {
return mid
}
if arr[mid] < target { // target 更大,要去右边找
left = mid + 1
} else {
right = mid
}
}
return -1
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// TokenizerUAXURLEmail Word Orientated Tokenizer which is like the standard tokenizer
// except that it recognises URLs and email addresses as single tokens.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/analysis-uaxurlemail-tokenizer.html
// for details.
type TokenizerUAXURLEmail struct {
Tokenizer
name string
// fields specific to uax url email tokenizer
maxTokenLength *int
}
// NewTokenizerUAXURLEmail initializes a new TokenizerUAXURLEmail.
func NewTokenizerUAXURLEmail(name string) *TokenizerUAXURLEmail {
return &TokenizerUAXURLEmail{
name: name,
}
}
// Name returns field key for the Tokenizer.
func (u *TokenizerUAXURLEmail) Name() string {
return u.name
}
// MaxTokenLength sets the maximum token length and if a token is seen that exceeds this
// length then it is split at `max_token_length` intervals.
// Defaults to 255.
func (u *TokenizerUAXURLEmail) MaxTokenLength(maxTokenLength int) *TokenizerUAXURLEmail {
u.maxTokenLength = &maxTokenLength
return u
}
// Validate validates TokenizerUAXURLEmail.
func (u *TokenizerUAXURLEmail) Validate(includeName bool) error {
var invalid []string
if includeName && u.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (u *TokenizerUAXURLEmail) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "uax_url_email",
// "max_token_length": 255
// }
// }
options := make(map[string]interface{})
options["type"] = "uax_url_email"
if u.maxTokenLength != nil {
options["max_token_length"] = u.maxTokenLength
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[u.name] = options
return source, nil
}
|
package fmc
import "github.com/unixpickle/gocube"
// IsF2LMinus1Solved returns true if any F2L-1 is solved. It returns the face of
// the F2L-1 cross and the corner index of the pair which is not solved.
func IsF2LMinus1Solved(state gocube.CubieCube) (solved bool, face, corner int) {
crossEdges := []int{
0, 4, 5, 6,
2, 8, 10, 11,
0, 1, 2, 3,
6, 7, 8, 9,
1, 5, 7, 11,
3, 4, 9, 10,
}
pairEdges := []int{
1, 3, 7, 9,
1, 3, 7, 9,
4, 5, 10, 11,
4, 5, 10, 11,
0, 2, 6, 8,
0, 2, 6, 8,
}
pairCorners := []int{
7, 6, 3, 2,
5, 4, 1, 0,
6, 7, 4, 5,
2, 3, 0, 1,
7, 5, 3, 1,
6, 4, 2, 0,
}
var edgesSolved [12]bool
for i := 0; i < 12; i++ {
if state.Edges[i].Piece == i && !state.Edges[i].Flip {
edgesSolved[i] = true
}
}
var cornersSolved [8]bool
for i := 0; i < 8; i++ {
if state.Corners[i].Piece == i && state.Corners[i].Orientation == 1 {
cornersSolved[i] = true
}
}
solved = true
CrossLoop:
for face = 1; face <= 6; face++ {
faceStartIndex := (face - 1) * 4
for i := 0; i < 4; i++ {
if !edgesSolved[crossEdges[faceStartIndex+i]] {
continue CrossLoop
}
}
corner = -1
for i := 0; i < 4; i++ {
if !cornersSolved[pairCorners[faceStartIndex+i]] ||
!edgesSolved[pairEdges[faceStartIndex+i]] {
if corner == -1 {
corner = pairCorners[faceStartIndex+i]
} else {
continue CrossLoop
}
}
}
return
}
return false, -1, -1
}
// ThreeStepF2LMinus1 finds solutions to the F2L-1 by solving 2x2x3 in two steps
// and then expanding them to F2L-1.
func ThreeStepF2LMinus1(cube gocube.CubieCube) <-chan []gocube.Move {
channel := make(chan []gocube.Move, 1)
go func() {
for blockSolution := range TwoStep2x2x3(cube) {
start := cube
for _, move := range blockSolution {
start.Move(move)
}
lastFace := -1
if len(blockSolution) > 0 {
lastFace = blockSolution[len(blockSolution)-1].Face()
}
moves := iterativef2lminus1(start, lastFace)
channel <- append(blockSolution, moves...)
}
}()
return channel
}
func iterativef2lminus1(start gocube.CubieCube, lastFace int) []gocube.Move {
for depth := 0; true; depth++ {
if solution := solvef2lminus1(start, depth, lastFace); solution != nil {
return solution
}
}
return nil
}
func solvef2lminus1(start gocube.CubieCube, depth, lastFace int) []gocube.Move {
if depth == 0 {
if solved, _, _ := IsF2LMinus1Solved(start); solved {
return []gocube.Move{}
} else {
return nil
}
}
for m := 0; m < 18; m++ {
move := gocube.Move(m)
face := move.Face()
if face == lastFace {
continue
}
newCube := start
newCube.Move(move)
if solution := solvef2lminus1(newCube, depth-1, face); solution != nil {
return append([]gocube.Move{move}, solution...)
}
}
return nil
}
|
/*
* traPCollection API
*
* traPCollectionのAPI
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
import (
echo "github.com/labstack/echo/v4"
)
func SetupRouting(e *echo.Echo, api *Api) {
e.DELETE("/api/games/:gameID", DeleteGamesHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.GET("/api/games/:gameID/info", GetGameHandler(api.GameApi), api.BothAuthMiddleware)
e.GET("/api/games/asset/:gameID/file", GetGameFileHandler(api.GameApi), api.BothAuthMiddleware)
e.GET("/api/games/asset/:gameID/url", GetGameURLHandler(api.GameApi), api.BothAuthMiddleware)
e.GET("/api/games/:gameID/version", GetGameVersionHandler(api.GameApi), api.TrapMemberAuthMiddleware)
e.GET("/api/games", GetGamesHandler(api.GameApi), api.TrapMemberAuthMiddleware)
e.GET("/api/games/:gameID/image", GetImageHandler(api.GameApi), api.BothAuthMiddleware)
e.GET("/api/games/:gameID/maintainers", GetMaintainerHandler(api.GameApi), api.TrapMemberAuthMiddleware)
e.GET("/api/games/:gameID/video", GetVideoHandler(api.GameApi), api.BothAuthMiddleware)
e.POST("/api/games/asset/:gameID/file", PostFileHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.POST("/api/games", PostGameHandler(api.GameApi), api.TrapMemberAuthMiddleware)
e.POST("/api/games/:gameID/version", PostGameVersionHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.POST("/api/games/:gameID/image", PostImageHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.POST("/api/games/:gameID/maintainers", PostMaintainerHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameOwnerAuthMiddleware)
e.POST("/api/games/asset/:gameID/url", PostURLHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.POST("/api/games/:gameID/video", PostVideoHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.PUT("/api/games/:gameID/info", PutGameHandler(api.GameApi), api.TrapMemberAuthMiddleware, api.GameMaintainerAuthMiddleware)
e.DELETE("/api/launcher/key/:productKeyID", DeleteProductKeyHandler(api.LauncherAuthApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.GET("/api/launcher/me", GetLauncherMeHandler(api.LauncherAuthApi), api.LauncherAuthMiddleware)
e.GET("/api/versions/:launcherVersionID/keys", GetProductKeysHandler(api.LauncherAuthApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.POST("/api/launcher/key/generate", PostKeyGenerateHandler(api.LauncherAuthApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.POST("/api/launcher/login", PostLauncherLoginHandler(api.LauncherAuthApi))
e.GET("/api/oauth2/callback", CallbackHandler(api.Oauth2Api))
e.GET("/api/oauth2/generate/code", GetGeneratedCodeHandler(api.Oauth2Api))
e.POST("/api/oauth2/logout", PostLogoutHandler(api.Oauth2Api), api.TrapMemberAuthMiddleware)
e.DELETE("/api/seats", DeleteSeatHandler(api.SeatApi), api.LauncherAuthMiddleware)
e.GET("/api/seats/versions/:seatVersionID", GetSeatsHandler(api.SeatApi), api.TrapMemberAuthMiddleware)
e.POST("/api/seats", PostSeatHandler(api.SeatApi), api.LauncherAuthMiddleware)
e.DELETE("/api/seats/versions/:seatVersionID", DeleteSeatVersionHandler(api.SeatVersionApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.POST("/api/seats/versions", PostSeatVersionHandler(api.SeatVersionApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.GET("/api/users/me", GetMeHandler(api.UserApi), api.TrapMemberAuthMiddleware)
e.GET("/api/users", GetUsersHandler(api.UserApi), api.TrapMemberAuthMiddleware)
e.GET("/api/versions/check", GetCheckListHandler(api.VersionApi), api.LauncherAuthMiddleware)
e.GET("/api/versions/:launcherVersionID", GetVersionHandler(api.VersionApi), api.BothAuthMiddleware)
e.GET("/api/versions", GetVersionsHandler(api.VersionApi), api.TrapMemberAuthMiddleware)
e.POST("/api/versions/:launcherVersionID/game", PostGameToVersionHandler(api.VersionApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
e.POST("/api/versions", PostVersionHandler(api.VersionApi), api.TrapMemberAuthMiddleware, api.AdminAuthMiddleware)
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"math"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
// limitNode represents a node that limits the number of rows
// returned or only return them past a given number (offset).
type limitNode struct {
plan planNode
countExpr tree.TypedExpr
offsetExpr tree.TypedExpr
}
func (n *limitNode) startExec(params runParams) error {
panic("limitNode cannot be run in local mode")
}
func (n *limitNode) Next(params runParams) (bool, error) {
panic("limitNode cannot be run in local mode")
}
func (n *limitNode) Values() tree.Datums {
panic("limitNode cannot be run in local mode")
}
func (n *limitNode) Close(ctx context.Context) {
n.plan.Close(ctx)
}
// evalLimit evaluates the Count and Offset fields. If Count is missing, the
// value is MaxInt64. If Offset is missing, the value is 0
func evalLimit(
evalCtx *tree.EvalContext, countExpr, offsetExpr tree.TypedExpr,
) (count, offset int64, err error) {
count = math.MaxInt64
offset = 0
data := []struct {
name string
src tree.TypedExpr
dst *int64
}{
{"LIMIT", countExpr, &count},
{"OFFSET", offsetExpr, &offset},
}
for _, datum := range data {
if datum.src != nil {
dstDatum, err := datum.src.Eval(evalCtx)
if err != nil {
return count, offset, err
}
if dstDatum == tree.DNull {
// Use the default value.
continue
}
dstDInt := tree.MustBeDInt(dstDatum)
val := int64(dstDInt)
if val < 0 {
return count, offset, fmt.Errorf("negative value for %s", datum.name)
}
*datum.dst = val
}
}
return count, offset, nil
}
|
package connection_manager
import (
pb "github.com/1851616111/xchain/pkg/protos"
)
type Connection interface {
Send(*pb.Message) error
Recv() (*pb.Message, error)
}
|
package account
import (
"encoding/json"
domain2 "github.com/CMedrado/DesafioStone/pkg/domain"
http2 "github.com/CMedrado/DesafioStone/pkg/gateways/http"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"net/http"
)
func (s *Handler) GetBalance(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
balance, err := s.account.GetBalance(id)
w.Header().Set("content-type", "application/json")
l := s.logger.WithFields(log.Fields{
"module": "https",
"method": "handleBalance",
})
e := errorStruct{l: l, w: w, id: id}
if err != nil {
e.errorBalance(err)
return
}
l.WithFields(log.Fields{
"type": http.StatusOK,
"request_id": id,
}).Info("balance handled successfully!")
response := BalanceResponse{Balance: balance}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (e errorStruct) errorBalance(err error) {
ErrJson := http2.ErrorsResponse{Errors: err.Error()}
if err.Error() == domain2.ErrInvalidID.Error() {
e.l.WithFields(log.Fields{
"type": http.StatusNotFound,
"request_id": e.id,
}).Error(err)
e.w.WriteHeader(http.StatusNotFound)
json.NewEncoder(e.w).Encode(ErrJson)
} else if err.Error() == domain2.ErrSelect.Error() {
e.l.WithFields(log.Fields{
"type": http.StatusInternalServerError,
}).Error(err)
e.w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(e.w).Encode(ErrJson)
} else if err.Error() == domain2.ErrParse.Error() {
e.l.WithFields(log.Fields{
"type": http.StatusBadRequest,
}).Error(err)
e.w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(e.w).Encode(ErrJson)
} else {
e.w.WriteHeader(http.StatusBadRequest)
}
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package example
import (
"context"
"math"
"time"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: Touch,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Demonstrates injecting touch events",
Contacts: []string{"ricardoq@chromium.org", "tast-owners@chromium.org"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
HardwareDeps: hwdep.D(hwdep.TouchScreen()),
Params: []testing.Param{{
Fixture: "chromeLoggedIn",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Val: browser.TypeLacros,
}},
})
}
func Touch(ctx context.Context, s *testing.State) {
sleep := func(t time.Duration) {
if err := testing.Sleep(ctx, t); err != nil {
s.Fatal("Timeout reached: ", err)
}
}
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to open connection: ", err)
}
info, err := display.GetInternalInfo(ctx, tconn)
if err != nil {
s.Fatal("No display: ", err)
}
// Setup a browser before opening a tab.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(ctx)
// TODO(ricardoq): This page might change/break in the future. If so, a built-in
// HTML page that accepts drawing should be used. Additionally, Kleki seems to ignore
// the 2nd & last events when drawing splines. But for the purpose of showing how
// to use the API is good enough.
conn, err := br.NewConn(ctx, "http://kleki.com")
if err != nil {
s.Fatal("Failed to open connection: ", err)
}
defer conn.Close()
if err := conn.WaitForExpr(ctx, "document.getElementsByTagName('canvas').length > 0"); err != nil {
s.Fatal("Timed out waiting for page load: ", err)
}
s.Log("Finding and opening touchscreen device")
// It is possible to send raw events to the Touchscreen type. But it is recommended to
// use the Touchscreen.TouchEventWriter struct since it already has functions to manipulate
// Touch events.
tsw, err := input.Touchscreen(ctx)
if err != nil {
s.Fatal("Failed to open touchscreen device: ", err)
}
defer tsw.Close()
// Touchscreen bounds: The size of the touchscreen might not be the same
// as the display size. In fact, might be even up to 4x bigger.
touchWidth := tsw.Width()
touchHeight := tsw.Height()
// Display bounds
displayWidth := float64(info.Bounds.Width)
displayHeight := float64(info.Bounds.Height)
pixelToTouchFactorX := float64(touchWidth) / displayWidth
pixelToTouchFactorY := float64(touchHeight) / displayHeight
centerX := displayWidth * pixelToTouchFactorX / 2
centerY := displayHeight * pixelToTouchFactorY / 2
stw, err := tsw.NewSingleTouchWriter()
if err != nil {
s.Fatal("Could not get a new TouchEventWriter: ", err)
}
defer stw.Close()
// Draw a dotted line:
// SingleTouchEventWriter is being reused for the 15 dots. The event is "ended" after each touch.
// "End" is equivalent as lifting the finger from the touchscreen.
// Thus generating a "dotted" line, instead of continuos one.
for i := 0; i < 15; i++ {
// Values must be in "touchscreen coordinates", not pixel coordinates.
stw.Move(input.TouchCoord(centerX+float64(i)*50.0), input.TouchCoord(centerY+float64(i)*50.0))
stw.End()
sleep(100 * time.Millisecond)
}
// Draw a circle:
// Draws a circle with 120 touch events. The touch event is moved to
// 120 different locations generating a continuous circle.
stw, err = tsw.NewSingleTouchWriter()
if err != nil {
s.Fatal("Could not create TouchEventWriter: ", err)
}
defer stw.Close()
const (
radius = 200 // circle radius in pixels
segments = 120 // segments used for the circle
)
for i := 0; i < segments; i++ {
rads := 2.0*math.Pi*(float64(i)/segments) + math.Pi
x := radius * pixelToTouchFactorX * math.Cos(rads)
y := radius * pixelToTouchFactorY * math.Sin(rads)
if err := stw.Move(input.TouchCoord(centerX+x), input.TouchCoord(centerY+y)); err != nil {
s.Fatal("Failed to move the touch event: ", err)
}
sleep(15 * time.Millisecond)
}
// And finally "end" (lift the finger) the line.
if err := stw.End(); err != nil {
s.Fatal("Failed to finish the touch event: ", err)
}
// Swipe test:
// Draw a box around the circle using 4 swipes.
const boxSize = radius * 2 // box size in pixels
stw, err = tsw.NewSingleTouchWriter()
if err != nil {
s.Fatal("Could not create TouchEventWriter: ", err)
}
defer stw.Close()
for _, d := range []struct {
x0, y0, x1, y1 float64
}{
{-1, 1, -1, -1}, // swipe up form bottom-left
{-1, -1, 1, -1}, // swipe right from top-left
{1, -1, 1, 1}, // swipe down from top-right
{1, 1, -1, 1}, // swipe left from bottom-right
} {
x0 := input.TouchCoord(centerX + boxSize/2*d.x0*pixelToTouchFactorX)
y0 := input.TouchCoord(centerY + boxSize/2*d.y0*pixelToTouchFactorY)
x1 := input.TouchCoord(centerX + boxSize/2*d.x1*pixelToTouchFactorX)
y1 := input.TouchCoord(centerY + boxSize/2*d.y1*pixelToTouchFactorY)
if err := stw.Swipe(ctx, x0, y0, x1, y1, 500*time.Millisecond); err != nil {
s.Error("Failed to run Swipe: ", err)
}
}
if err := stw.End(); err != nil {
s.Error("Failed to finish the swipe gesture: ", err)
}
// Multitouch test: Zoom out + zoom in
// Get a multitouch writer for two touches.
mtw, err := tsw.NewMultiTouchWriter(2)
if err != nil {
s.Fatal("Could not get a new TouchEventWriter: ", err)
}
defer mtw.Close()
// Get the individual TouchState
ts0 := mtw.TouchState(0)
ts1 := mtw.TouchState(1)
// Zoom out
for i := 5; i < 100; i++ {
deltaX := float64(i) * pixelToTouchFactorX
deltaY := float64(i) * pixelToTouchFactorY
ts0.SetPos(input.TouchCoord(centerX-deltaX), input.TouchCoord(centerY-deltaY))
ts1.SetPos(input.TouchCoord(centerX+deltaX), input.TouchCoord(centerY+deltaY))
mtw.Send()
sleep(15 * time.Millisecond)
}
// Zoom in
for i := 100; i > 15; i-- {
deltaX := float64(i) * pixelToTouchFactorX
deltaY := float64(i) * pixelToTouchFactorY
ts0.SetPos(input.TouchCoord(centerX-deltaX), input.TouchCoord(centerY-deltaY))
ts1.SetPos(input.TouchCoord(centerX+deltaX), input.TouchCoord(centerY+deltaY))
mtw.Send()
sleep(15 * time.Millisecond)
}
mtw.End()
}
|
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func indexHandler(w http.ResponseWriter, req *http.Request) {
f, err := os.Open("/home/liminghao/Dev/Golang/src/github.com/castermode/golang-test/web/html/form.html")
if err == nil {
defer f.Close()
buf := make([]byte, 1024*16)
if _, err := f.Read(buf); err == nil {
fmt.Fprintf(w, string(buf))
}
} else {
fmt.Fprintf(w, "NoPage!")
}
}
func main() {
http.HandleFunc("/", indexHandler)
fmt.Println("listening 47.89.43.129:10001")
log.Fatal(http.ListenAndServe(":10001", nil))
}
|
package organisations
import (
"encoding/json"
"fmt"
"os"
"testing"
"github.com/Financial-Times/annotations-rw-neo4j/annotations"
"github.com/Financial-Times/neo-utils-go/neoutils"
"github.com/jmcvetta/neoism"
"github.com/stretchr/testify/assert"
)
const (
org1UUID = "0d99ab07-3b0a-4313-939e-caa02db23aa1"
org2UUID = "b40d53d3-3b0d-4069-90d9-0ccf9d7e1d0c"
org3UUID = "ba956ba9-e552-4abf-9850-1346da690bb8"
org9UUID = "bbb7173b-2e90-4cc5-b439-252427e46cd0"
org8UUID = "5c510ad1-2b73-4375-90e1-6ccbc50bd21f"
contentUUID = "c3bce4dc-c857-4fe6-8277-61c0294d9187"
fsOrg1Identifier = "org1 factset id"
fsOrg8Identifier = "org8 factset id"
leiCodeOrg8Identifier = "leiCodeIdentifier org8"
leiCodeOrgxIdentifier = "leiCodeIdentifier"
tmeOrg2Identifier = "tmeIdentifier org2"
tmeOrg3Identifier = "tmeIdentifier org3"
tmeOrg8Identifier = "tmeIdentifier org8"
tmeOrg9Identifier = "tmeIdentifier org9"
)
var concordedUUIDs = []string{org1UUID, org2UUID, org3UUID, org9UUID, org8UUID}
var org1 = organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg1Identifier,
UUIDS: []string{org1UUID},
LeiCode: leiCodeOrgxIdentifier,
TME: []string{},
},
ProperName: "Proper Name 1",
}
var org2 = organisation{
UUID: org2UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
UUIDS: []string{org2UUID},
TME: []string{tmeOrg2Identifier},
},
ProperName: "Proper Name 2",
ParentOrganisation: org8UUID,
}
var org3 = organisation{
UUID: org3UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
UUIDS: []string{org3UUID},
TME: []string{tmeOrg3Identifier},
},
ProperName: "Proper Name 3",
ParentOrganisation: org2UUID,
}
var org8 = organisation{
UUID: org8UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg8Identifier,
UUIDS: []string{org8UUID},
TME: []string{tmeOrg8Identifier},
LeiCode: leiCodeOrg8Identifier,
},
ProperName: "Proper Name 8",
}
var org9 = organisation{
UUID: org9UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
UUIDS: []string{org9UUID},
TME: []string{tmeOrg9Identifier},
},
ProperName: "Proper Name 9",
}
func TestConcordeThreeOrganisations(t *testing.T) {
assert := assert.New(t)
db := getDatabaseConnectionAndCheckClean(t, assert, concordedUUIDs)
cypherDriver := getCypherDriver(db)
defer cleanDB(db, t, assert, concordedUUIDs)
org1Updated := organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg1Identifier,
UUIDS: []string{org1UUID, org2UUID, org9UUID},
LeiCode: leiCodeOrgxIdentifier,
TME: []string{tmeOrg2Identifier, tmeOrg9Identifier},
},
ProperName: "Updated Name",
}
assert.NoError(cypherDriver.Write(org1, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org2, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org9, "TEST_TRANS_ID"))
_, found, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org2UUID)
_, found, _ = cypherDriver.Read(org9UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org9UUID)
assert.NoError(cypherDriver.Write(org1Updated, "TEST_TRANS_ID"))
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.False(found, "Organisation for uuid %s should have been deleted", org2UUID)
_, found, _ = cypherDriver.Read(org9UUID, "TEST_TRANS_ID")
assert.False(found, "Organisation for uuid %s should have been deleted", org9UUID)
org1Stored, found, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
assert.Equal(org1Updated, org1Stored)
}
// concorde node with multiple major mentions (mentions with platformVersion v1)
func TestConcordeOrganisationsWithRelationships(t *testing.T) {
assert := assert.New(t)
db := getDatabaseConnectionAndCheckClean(t, assert, concordedUUIDs)
cypherDriver := getCypherDriver(db)
defer cleanDB(db, t, assert, concordedUUIDs)
// STEP 1: write nodes
assert.NoError(cypherDriver.Write(org1, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org2, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org9, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org8, "TEST_TRANS_ID"))
_, found, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org2UUID)
_, found, _ = cypherDriver.Read(org9UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org9UUID)
_, found, _ = cypherDriver.Read(org8UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org8UUID)
//STEP 2: write relationships
//write V2 mentions, and about annotation for org2UUID, write V2 mentions annotation for org8UUID
v2AnnotationsRW := annotations.NewCypherAnnotationsService(cypherDriver.conn)
writeJSONToService(v2AnnotationsRW, "./test-resources/annotationBodyForOrg2AndOrg8.json", contentUUID, assert)
//write V1 mentions annotation for org1UUID and org9UUID - considered as major mentions
v1AnnotationsRW := annotations.NewCypherAnnotationsService(cypherDriver.conn)
writeJSONToService(v1AnnotationsRW, "./test-resources/annotationBodyForOrg1AndOrg9.json", contentUUID, assert)
//STEP3: concorde org1, with org2 and org9
updatedOrg1 := organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg1Identifier,
UUIDS: []string{org1UUID, org2UUID, org9UUID},
LeiCode: leiCodeOrgxIdentifier,
TME: []string{tmeOrg2Identifier, tmeOrg9Identifier},
},
// should come out from the transformer like this, otherwise won't be merged
ProperName: "Updated Name",
ParentOrganisation: org8UUID, // should come out from the transformer - otherwise won't be transferred
}
assert.NoError(cypherDriver.Write(updatedOrg1, "TEST_TRANS_ID"))
//RESULTS concording should result in:
// - the presence of node 1 and 8, absence of node 2, 9
_, found, _ = cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
_, found, _ = cypherDriver.Read(org8UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org8UUID)
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.False(found, "Didn't find organisation for uuid %s", org2UUID)
_, found, _ = cypherDriver.Read(org9UUID, "TEST_TRANS_ID")
assert.False(found, "Didn't find organisation for uuid %s", org9UUID)
//- for org 8:
// - one v2 mentions from content - which existed
// - one SUB_ORGANISATION_OF to org8 from org2
// - 4 IDENTIFIES relationships from identifiers to nodes
transferredPropertyLR, transferredPropertyRL, err := readRelationshipDetails(cypherDriver.conn, "Thing", org8UUID)
assert.Nil(err)
assert.Equal(0, len(transferredPropertyRL))
assert.Equal(1, len(transferredPropertyLR))
assert.Contains(transferredPropertyLR, property{Type: "SUB_ORGANISATION_OF", PlatformVersion: ""})
transferredPropertyLR, transferredPropertyRL, err = readRelationshipDetails(cypherDriver.conn, "Identifier", org8UUID)
assert.Nil(err)
assert.Equal(0, len(transferredPropertyRL))
assert.Equal(4, len(transferredPropertyLR))
for _, rel := range transferredPropertyLR {
assert.Equal("IDENTIFIES", rel.Type)
assert.Equal("", rel.PlatformVersion)
}
// - for org 1:
// - one v2 mentions from content
// - one v1 mentions from content (two merged in one, with properties from the randomly selected relationship)
// - one v2 about from content
// - one SUB_ORGANISATION_OF to org8
// - 7 IDENTIFIES relationships from identifiers to node
transferredPropertyLR, transferredPropertyRL, err = readRelationshipDetails(cypherDriver.conn, "Thing", org1UUID)
assert.Nil(err)
assert.Equal(1, len(transferredPropertyLR))
assert.Contains(transferredPropertyLR, property{Type: "MENTIONS"})
assert.Equal(1, len(transferredPropertyRL))
assert.Contains(transferredPropertyRL, property{Type: "SUB_ORGANISATION_OF", PlatformVersion: ""})
transferredPropertyLR, transferredPropertyRL, err = readRelationshipDetails(cypherDriver.conn, "Identifier", org1UUID)
assert.Nil(err)
assert.Equal(0, len(transferredPropertyRL))
assert.Equal(7, len(transferredPropertyLR))
assert.Contains(transferredPropertyLR, property{Type: "IDENTIFIES", PlatformVersion: ""})
for _, rel := range transferredPropertyLR {
assert.Equal("IDENTIFIES", rel.Type)
assert.Equal("", rel.PlatformVersion)
}
}
// Concorde nodes with incoming and outgoing has-organisation-of relationships
func TestTransferIncomingHasSubOrganisationOfRelationships(t *testing.T) {
assert := assert.New(t)
//4 nodes with:
// [org3]-[sub-organisation-of]->[org2]
// [org2]-[sub-organisation-of]->[org8]
// INPUT: org1 will be concorded with org2 ([org1]-[sub-organisation-of]->[org8] should come from the transformer)
// Result: [org3]-[sub-organisation-of]->[org1] should be transferred
db := getDatabaseConnectionAndCheckClean(t, assert, concordedUUIDs)
cypherDriver := getCypherDriver(db)
defer cleanDB(db, t, assert, concordedUUIDs)
//Step1: write nodes
assert.NoError(cypherDriver.Write(org1, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org2, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org3, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org8, "TEST_TRANS_ID"))
_, found, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org2UUID)
_, found, _ = cypherDriver.Read(org3UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org3UUID)
_, found, _ = cypherDriver.Read(org8UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org8UUID)
//Step 2: concorde org1 with org2
updatedOrg1 := organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg1Identifier,
UUIDS: []string{org1UUID, org2UUID},
TME: []string{tmeOrg2Identifier},
}, // should come out from the transformer like this, otherwise won't be merged
ProperName: "Updated Name",
ParentOrganisation: org8UUID, // should come out from the transformer - otherwise won't be transferred
}
assert.NoError(cypherDriver.Write(updatedOrg1, "TEST_TRANS_ID"))
//Step3: check results
// -> no org2
_, found, _ = cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org1UUID)
_, found, _ = cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
assert.False(found, "Organisation for uuid %s should have been concorded", org2UUID)
_, found, _ = cypherDriver.Read(org3UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org3UUID)
_, found, _ = cypherDriver.Read(org8UUID, "TEST_TRANS_ID")
assert.True(found, "Didn't find organisation for uuid %s", org8UUID)
// -> org1 present with incoming and outgoing sub-organisation-of relationships
storedOrg1, _, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.Equal(updatedOrg1, storedOrg1, "orgs should be equal ")
transferredPropertyLR, transferredPropertyRL, err := readRelationshipDetails(cypherDriver.conn, "Thing", org1UUID)
assert.Nil(err)
assert.Equal(1, len(transferredPropertyLR))
assert.Contains(transferredPropertyLR, property{Type: "SUB_ORGANISATION_OF", PlatformVersion: ""})
assert.Equal(1, len(transferredPropertyRL))
assert.Contains(transferredPropertyRL, property{Type: "SUB_ORGANISATION_OF", PlatformVersion: ""})
}
// Check that alternative nodes are deleted at concordence, but identifiers are kept
func TestConcordeOrgsAndDeleteAlternativeNodes(t *testing.T) {
assert := assert.New(t)
db := getDatabaseConnectionAndCheckClean(t, assert, concordedUUIDs)
cypherDriver := getCypherDriver(db)
defer cleanDB(db, t, assert, concordedUUIDs)
updatedOrg1 := organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg1Identifier,
UUIDS: []string{org1UUID, org2UUID},
TME: []string{},
},
ProperName: "Updated Name",
}
assert.NoError(cypherDriver.Write(org1, "TEST_TRANS_ID"))
assert.NoError(cypherDriver.Write(org2, "TEST_TRANS_ID"))
storedOrg1, _, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.Equal(org1, storedOrg1, "orgs should be equal ")
assert.NoError(cypherDriver.Write(updatedOrg1, "TEST_TRANS_ID"))
storedOrg2, _, _ := cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
storedUpdatedOrg1, _, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.Equal(organisation{}, storedOrg2, "org should have been deleted")
assert.Equal(updatedOrg1, storedUpdatedOrg1, "org should have been updated")
}
// Concorde relationships with the same platformVersion - if any
func TestConcordeOrgsWithRelationshipPlatformVersionTransfer(t *testing.T) {
assert := assert.New(t)
db := getDatabaseConnectionAndCheckClean(t, assert, concordedUUIDs)
cypherDriver := getCypherDriver(db)
annotationsRW := annotations.NewCypherAnnotationsService(cypherDriver.conn)
defer cleanDB(db, t, assert, concordedUUIDs)
defer deleteAllViaService(db, assert, annotationsRW)
assert.NoError(cypherDriver.Write(org2, "TEST_TRANS_ID"))
relOrg2L, relOrg2R, err := getNodeRelationshipNames(cypherDriver.conn, org2UUID)
assert.Nil(err)
relOrg1L, relOrg1R, err := getNodeRelationshipNames(cypherDriver.conn, org1UUID)
assert.Empty(relOrg1L)
assert.Empty(relOrg1R)
assert.Nil(err)
updatedOrg1 := organisation{
UUID: org1UUID,
Type: Organisation,
AlternativeIdentifiers: alternativeIdentifiers{
FactsetIdentifier: fsOrg8Identifier,
UUIDS: []string{org1UUID, org2UUID},
TME: []string{},
},
ProperName: "Updated Name",
ParentOrganisation: org8UUID,
}
writeJSONToService(annotationsRW, "./test-resources/annotationBodyForOrg2.json", contentUUID, assert)
assert.NoError(cypherDriver.Write(updatedOrg1, "TEST_TRANS_ID"))
relUpdatedOrg1L, relUpdatedOrg1R, err := getNodeRelationshipNames(cypherDriver.conn, org1UUID)
assert.Nil(err)
for _, rel := range relOrg2L {
contains(relUpdatedOrg1L, rel.RelationshipType)
}
for _, rel := range relOrg2R {
contains(relUpdatedOrg1R, rel.RelationshipType)
}
storedOrg2, _, _ := cypherDriver.Read(org2UUID, "TEST_TRANS_ID")
storedOrg1, _, _ := cypherDriver.Read(org1UUID, "TEST_TRANS_ID")
assert.Equal(organisation{}, storedOrg2, "org should have been deleted")
assert.Equal(updatedOrg1, storedOrg1, "org should have been updated")
transferredPropertyLR, transferredPropertyRL, err := readRelationshipDetails(cypherDriver.conn, "Thing", org1UUID)
assert.Nil(err)
assert.Equal(2, len(transferredPropertyLR))
assert.Contains(transferredPropertyLR, property{Type: "MENTIONS"})
assert.Contains(transferredPropertyLR, property{Type: "ABOUT"})
assert.Equal(1, len(transferredPropertyRL))
assert.Contains(transferredPropertyRL, property{Type: "SUB_ORGANISATION_OF", PlatformVersion: ""})
}
func writeJSONToService(service annotations.Service, pathToJSONFile string, contentUUID string, assert *assert.Assertions) {
f, err := os.Open(pathToJSONFile)
assert.NoError(err)
dec := json.NewDecoder(f)
annotation, errr := service.DecodeJSON(dec)
assert.NoError(errr)
errrr := service.Write(contentUUID, "", "", "TEST_TRANS_ID", annotation)
assert.NoError(errrr)
}
func deleteAllViaService(db neoutils.CypherRunner, assert *assert.Assertions, annotationsRW annotations.Service) {
_, err := annotationsRW.Delete(contentUUID, "TEST_TRANS_ID")
assert.Nil(err)
qs := []*neoism.CypherQuery{
{
Statement: fmt.Sprintf("MATCH (c:Thing {uuid: '%v'})-[rel]-(o) DELETE c, rel ", contentUUID),
},
{
Statement: fmt.Sprintf("MATCH (c:Thing {uuid: '%v'}) DELETE c ", contentUUID),
},
}
err = db.CypherBatch(qs)
assert.NoError(err)
}
type property struct {
Type string `json:"name"`
PlatformVersion string `json:"r.platformVersion"`
}
// return relationship details in both directions
func readRelationshipDetails(cypherRunner neoutils.CypherRunner, contentType string, orgUUID string) ([]property, []property, error) {
transferredLRProperty := []property{}
readRelationshipsQueryLR := &neoism.CypherQuery{
Statement: fmt.Sprintf(`match (co:%s)-[r]->(c:Thing{uuid:{uuid}})
return r.platformVersion, type(r) as name`, contentType),
Parameters: map[string]interface{}{
"uuid": orgUUID,
},
Result: &transferredLRProperty,
}
transferredRLProperty := []property{}
readRelationshipsQueryRL := &neoism.CypherQuery{
Statement: fmt.Sprintf(`match (co:%s)<-[r]-(c:Thing{uuid:{uuid}})
return r.platformVersion, type(r) as name`, contentType),
Parameters: map[string]interface{}{
"uuid": orgUUID,
},
Result: &transferredRLProperty,
}
err := cypherRunner.CypherBatch([]*neoism.CypherQuery{readRelationshipsQueryLR, readRelationshipsQueryRL})
return transferredLRProperty, transferredRLProperty, err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.