text stringlengths 11 4.05M |
|---|
package v3
import "github.com/cockroachdb/cockroach/pkg/util/treeprinter"
func init() {
registerOperator(projectOp, "project", projectClass{})
}
func newProjectExpr(input *expr) *expr {
return &expr{
op: projectOp,
children: []*expr{input, nil /* projection */},
}
}
type projectClass struct{}
var _ operatorClass = projectClass{}
func (projectClass) kind() operatorKind {
// Project is both a logical and a physical operator.
return logicalKind | physicalKind | relationalKind
}
func (projectClass) layout() exprLayout {
return exprLayout{
projections: 1,
}
}
func (projectClass) format(e *expr, tp treeprinter.Node) {
n := formatRelational(e, tp)
formatExprs(n, "projections", e.projections())
formatExprs(n, "inputs", e.inputs())
}
func (projectClass) initKeys(e *expr, state *queryState) {
}
func (projectClass) updateProps(e *expr) {
excluded := e.props.outputCols.Union(e.providedInputCols())
e.props.outerCols = e.requiredInputCols().Difference(excluded)
for _, input := range e.inputs() {
e.props.outerCols.UnionWith(input.props.outerCols)
}
e.props.applyInputs(e.inputs())
}
func (projectClass) requiredProps(required *physicalProps, child int) *physicalProps {
if child == 0 {
return required // pass through
}
return nil
}
|
// Copyright 2014 Matthias Zenger. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package buffers
import "sync"
import . "github.com/objecthub/containerkit"
func SynchronizedQueue(class QueueClass) QueueClass {
return &synchronizedQueueClass{class}
}
type synchronizedQueueClass struct {
class QueueClass
}
func (this *synchronizedQueueClass) Embed(obj Queue) Queue {
res := new(synchronizedQueue)
if obj == nil {
obj = res
}
res.obj = obj
res.class = this
res.unsync = this.class.Embed(obj)
res.unsynchronizedQueue = res.unsync
return res
}
func (this *synchronizedQueueClass) New(elements... interface{}) Queue {
res := this.Embed(nil)
for i := 0; i < len(elements); i++ {
res.Enqueue(elements[i])
}
return res
}
func (this *synchronizedQueueClass) From(coll Container) Queue {
res := this.Embed(nil)
res.EnqueueFrom(coll)
return res
}
type unsynchronizedQueue interface {
Elements() Iterator
Take(n int) DependentContainer
TakeWhile(pred Predicate) DependentContainer
Drop(n int) DependentContainer
DropWhile(pred Predicate) DependentContainer
Filter(pred Predicate) DependentContainer
Map(f Mapping) DependentContainer
FlatMap(g Generator) DependentContainer
Flatten() DependentContainer
Concat(other Container) DependentContainer
Combine(f Binop, other Container) DependentContainer
Zip(other Container) DependentContainer
Class() QueueClass
}
type synchronizedQueue struct {
obj Queue
class QueueClass
mutex sync.RWMutex
unsync Queue
unsynchronizedQueue
}
func (this *synchronizedQueue) Size() int {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Size()
}
func (this *synchronizedQueue) Enqueue(elem interface{}) {
this.mutex.Lock()
defer this.mutex.Unlock()
this.unsync.Enqueue(elem)
}
func (this *synchronizedQueue) Dequeue() interface{} {
this.mutex.Lock()
defer this.mutex.Unlock()
return this.unsync.Dequeue()
}
func (this *synchronizedQueue) Peek() interface{} {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Peek()
}
func (this *synchronizedQueue) EnqueueFrom(coll Container) {
this.mutex.Lock()
defer this.mutex.Unlock()
this.unsync.EnqueueFrom(coll)
}
func (this *synchronizedQueue) Clear() {
this.mutex.Lock()
defer this.mutex.Unlock()
this.unsync.Clear()
}
func (this *synchronizedQueue) Class() QueueClass {
return this.class
}
func (this *synchronizedQueue) Copy() Queue {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Copy()
}
func (this *synchronizedQueue) IsEmpty() bool {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.IsEmpty()
}
func (this *synchronizedQueue) Exists(pred Predicate) bool {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Exists(pred)
}
func (this *synchronizedQueue) ForAll(pred Predicate) bool {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.ForAll(pred)
}
func (this *synchronizedQueue) ForEach(proc Procedure) {
this.mutex.RLock()
defer this.mutex.RUnlock()
this.unsync.ForEach(proc)
}
func (this *synchronizedQueue) FoldLeft(f Binop, z interface{}) interface{} {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.FoldLeft(f, z)
}
func (this *synchronizedQueue) FoldRight(f Binop, z interface{}) interface{} {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.FoldRight(f, z)
}
func (this *synchronizedQueue) Force() FiniteContainer {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Force()
}
func (this *synchronizedQueue) Freeze() FiniteContainer {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.Freeze()
}
func (this *synchronizedQueue) String() string {
this.mutex.RLock()
defer this.mutex.RUnlock()
return this.unsync.String()
}
|
package main
import (
"lib"
"net/http"
"strconv"
)
var config lib.Config
func init() {
config = lib.InitConfig()
}
func main() {
h := lib.Handler{
Conf: config,
}
http.ListenAndServe(config.Host+":"+strconv.Itoa(config.Port), h)
}
|
package command
import (
"fmt"
"os"
"path"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var rootCmd = &cobra.Command{
Use: "torchprint",
Short: "torchprint is a printjob manager for campus printing at NYU",
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var cfgFile string
// Execute runs root command
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file")
rootCmd.PersistentFlags().StringP("username", "u", "", "Login username")
rootCmd.PersistentFlags().StringP("password", "p", "", "Login password")
rootCmd.PersistentFlags().String("userid", "",
"Pharos user ID (if you don't know yours, run the login command)")
rootCmd.PersistentFlags().String("token", "", "Pharos user token")
viper.BindPFlag("username", rootCmd.PersistentFlags().Lookup("username"))
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
viper.BindPFlag("token", rootCmd.PersistentFlags().Lookup("token"))
viper.BindPFlag("userid", rootCmd.PersistentFlags().Lookup("userid"))
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(loginCmd)
rootCmd.AddCommand(addCmd)
rootCmd.AddCommand(listCmd)
rootCmd.AddCommand(deleteCmd)
rootCmd.AddCommand(historyCmd)
rootCmd.AddCommand(balanceCmd)
}
func initConfig() {
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// look for config in $HOME and $HOME/.config/torchprint
viper.AddConfigPath(path.Join(home, ".config", "torchprint"))
viper.AddConfigPath(home)
viper.SetConfigType("json")
viper.SetConfigName(".torchprint")
}
if err := viper.ReadInConfig(); err != nil {
// config file not found, but dont panic yet
// since token and userid can be supplied in args
}
}
|
package main
import (
"fmt"
"net"
)
func main() {
listener, err := net.Listen("tcp", ":8080")
if err != nil {
fmt.Println(err)
return
}
defer listener.Close()
fmt.Println("Server is listening...")
for {
conn, err := listener.Accept()
if err != nil {
fmt.Println(err)
conn.Close()
continue
}
go handleConnection(conn)
}
}
func handleConnection(conn net.Conn) {
defer conn.Close()
for {
input := make([]byte, (1024 * 4))
n, err := conn.Read(input)
if n == 0 || err != nil {
fmt.Println("Read error:", err)
break
}
result := string(input[:])
fmt.Println(result)
}
}
|
package main
import (
"fmt"
"os"
"time"
proto "example/rpc/example"
"example/internal/config"
"example/internal/routers"
"example/internal/service/hello"
"github.com/gin-gonic/gin"
"github.com/gmsec/goplugins/plugin"
"github.com/gmsec/micro"
"github.com/xxjwxc/public/mydoc/myswagger"
"github.com/xxjwxc/public/server"
)
// CallBack service call backe
func CallBack() {
// swagger
myswagger.SetHost("https://localhost:8080")
myswagger.SetBasePath("example")
myswagger.SetSchemes(true, false)
// -----end --
// reg := registry.NewDNSNamingRegistry()
// grpc 相关 初始化服务
service := micro.NewService(
micro.WithName("lp.srv.eg1"),
// micro.WithRegisterTTL(time.Second*30), //指定服务注册时间
micro.WithRegisterInterval(time.Second*15), //让服务在指定时间内重新注册
// micro.WithRegistryNaming(reg),
)
h := new(hello.Hello)
proto.RegisterHelloServer(service.Server(), h) // 服务注册
// ----------- end
// gin restful 相关
router := gin.Default()
router.Use(routers.Cors())
v1 := router.Group("/example/api/v1")
routers.OnInitRouter(v1, h) // 自定义初始化
// ------ end
plg, b := plugin.Run(plugin.WithMicro(service),
plugin.WithGin(router),
plugin.WithAddr(":82"))
if b == nil {
plg.Wait()
}
fmt.Println("done")
}
func main() {
if config.GetIsDev() || len(os.Args) == 0 {
CallBack()
} else {
server.On(config.GetServiceConfig()).Start(CallBack)
}
}
|
package _019_remove_nth_node_from_end_of_list
type ListNode struct {
Val int
Next *ListNode
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
node:=&ListNode{0,head}
prev,pn:=node,node
for i:=1;i<=n+1;i++{
pn=pn.Next
}
for pn!=nil {
prev=prev.Next
pn=pn.Next
}
prev.Next=prev.Next.Next
return node.Next
}
|
package domain
import (
"github.com/plancks-cloud/plancks-cloud/model"
)
const ServiceCollectionName = model.ServiceCollectionName
type Service model.Service
const RouteCollectionName = model.RouteCollectionName
type Route model.Route
|
package main
import "fmt"
func main() {
//var x float64 = 42 // Change this to see the result
var result string
if x := 42; x < 0 {
result = "Less than zero"
} else if x==0{
result = "Equal to zero"
} else {
result = "Greater than zero"
}
fmt.Println("Result:", result)
}
|
package model
import "gopkg.in/mgo.v2/bson"
const (
// 保存集合名称
CollectionArticle = "article"
)
type Article struct {
Id bson.ObjectId `json:"_id,omitempty" bson:"_id,omitempty"` // mongodb id
Title string `json:"title" form:"title" binding:"required" bson:"title"` // 标题
Body string `json:"body" form:"body" binding:"required" bson:"body"` // 内容
CreatedOn int64 `json:"created_on" bson:"created_on"` // 创建时间
UpdatedOn int64 `json:"updated_on" bson:"updated_on"` // 修改时间
}
|
package config
import (
"github.com/spf13/viper"
"time"
)
/*
Creation Time: 2019 - Oct - 06
Created by: (ehsan)
Maintainers:
1. Ehsan N. Moosa (E2)
Auditor: Ehsan N. Moosa (E2)
Copyright Ronak Software Group 2018
*/
func Set(key string, value interface{}) {
viper.Set(key, value)
}
func GetString(key string) string {
return viper.GetString(key)
}
func GetBool(key string) bool {
return viper.GetBool(key)
}
func GetInt64(key string) int64 {
return viper.GetInt64(key)
}
func GetInt32(key string) int32 {
return viper.GetInt32(key)
}
func GetInt(key string) int {
return viper.GetInt(key)
}
func GetDuration(key string) time.Duration {
return viper.GetDuration(key)
}
|
package management
//AppConfig is a type alias of the generated isApplication_AppConfig config
//to make it public
type AppConfig = isApplication_AppConfig
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// TestScript is documented here http://hl7.org/fhir/StructureDefinition/TestScript
type TestScript struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Text *Narrative `bson:"text,omitempty" json:"text,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Url string `bson:"url" json:"url"`
Identifier *Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
Version *string `bson:"version,omitempty" json:"version,omitempty"`
Name string `bson:"name" json:"name"`
Title *string `bson:"title,omitempty" json:"title,omitempty"`
Status PublicationStatus `bson:"status" json:"status"`
Experimental *bool `bson:"experimental,omitempty" json:"experimental,omitempty"`
Date *string `bson:"date,omitempty" json:"date,omitempty"`
Publisher *string `bson:"publisher,omitempty" json:"publisher,omitempty"`
Contact []ContactDetail `bson:"contact,omitempty" json:"contact,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
UseContext []UsageContext `bson:"useContext,omitempty" json:"useContext,omitempty"`
Jurisdiction []CodeableConcept `bson:"jurisdiction,omitempty" json:"jurisdiction,omitempty"`
Purpose *string `bson:"purpose,omitempty" json:"purpose,omitempty"`
Copyright *string `bson:"copyright,omitempty" json:"copyright,omitempty"`
Origin []TestScriptOrigin `bson:"origin,omitempty" json:"origin,omitempty"`
Destination []TestScriptDestination `bson:"destination,omitempty" json:"destination,omitempty"`
Metadata *TestScriptMetadata `bson:"metadata,omitempty" json:"metadata,omitempty"`
Fixture []TestScriptFixture `bson:"fixture,omitempty" json:"fixture,omitempty"`
Profile []Reference `bson:"profile,omitempty" json:"profile,omitempty"`
Variable []TestScriptVariable `bson:"variable,omitempty" json:"variable,omitempty"`
Setup *TestScriptSetup `bson:"setup,omitempty" json:"setup,omitempty"`
Test []TestScriptTest `bson:"test,omitempty" json:"test,omitempty"`
Teardown *TestScriptTeardown `bson:"teardown,omitempty" json:"teardown,omitempty"`
}
type TestScriptOrigin struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Index int `bson:"index" json:"index"`
Profile Coding `bson:"profile" json:"profile"`
}
type TestScriptDestination struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Index int `bson:"index" json:"index"`
Profile Coding `bson:"profile" json:"profile"`
}
type TestScriptMetadata struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Link []TestScriptMetadataLink `bson:"link,omitempty" json:"link,omitempty"`
Capability []TestScriptMetadataCapability `bson:"capability" json:"capability"`
}
type TestScriptMetadataLink struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Url string `bson:"url" json:"url"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
}
type TestScriptMetadataCapability struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Required bool `bson:"required" json:"required"`
Validated bool `bson:"validated" json:"validated"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Origin []int `bson:"origin,omitempty" json:"origin,omitempty"`
Destination *int `bson:"destination,omitempty" json:"destination,omitempty"`
Link []string `bson:"link,omitempty" json:"link,omitempty"`
Capabilities string `bson:"capabilities" json:"capabilities"`
}
type TestScriptFixture struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Autocreate bool `bson:"autocreate" json:"autocreate"`
Autodelete bool `bson:"autodelete" json:"autodelete"`
Resource *Reference `bson:"resource,omitempty" json:"resource,omitempty"`
}
type TestScriptVariable struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Name string `bson:"name" json:"name"`
DefaultValue *string `bson:"defaultValue,omitempty" json:"defaultValue,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Expression *string `bson:"expression,omitempty" json:"expression,omitempty"`
HeaderField *string `bson:"headerField,omitempty" json:"headerField,omitempty"`
Hint *string `bson:"hint,omitempty" json:"hint,omitempty"`
Path *string `bson:"path,omitempty" json:"path,omitempty"`
SourceId *string `bson:"sourceId,omitempty" json:"sourceId,omitempty"`
}
type TestScriptSetup struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Action []TestScriptSetupAction `bson:"action" json:"action"`
}
type TestScriptSetupAction struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Operation *TestScriptSetupActionOperation `bson:"operation,omitempty" json:"operation,omitempty"`
Assert *TestScriptSetupActionAssert `bson:"assert,omitempty" json:"assert,omitempty"`
}
type TestScriptSetupActionOperation struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Type *Coding `bson:"type,omitempty" json:"type,omitempty"`
Resource *string `bson:"resource,omitempty" json:"resource,omitempty"`
Label *string `bson:"label,omitempty" json:"label,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Accept *string `bson:"accept,omitempty" json:"accept,omitempty"`
ContentType *string `bson:"contentType,omitempty" json:"contentType,omitempty"`
Destination *int `bson:"destination,omitempty" json:"destination,omitempty"`
EncodeRequestUrl bool `bson:"encodeRequestUrl" json:"encodeRequestUrl"`
Method *TestScriptRequestMethodCode `bson:"method,omitempty" json:"method,omitempty"`
Origin *int `bson:"origin,omitempty" json:"origin,omitempty"`
Params *string `bson:"params,omitempty" json:"params,omitempty"`
RequestHeader []TestScriptSetupActionOperationRequestHeader `bson:"requestHeader,omitempty" json:"requestHeader,omitempty"`
RequestId *string `bson:"requestId,omitempty" json:"requestId,omitempty"`
ResponseId *string `bson:"responseId,omitempty" json:"responseId,omitempty"`
SourceId *string `bson:"sourceId,omitempty" json:"sourceId,omitempty"`
TargetId *string `bson:"targetId,omitempty" json:"targetId,omitempty"`
Url *string `bson:"url,omitempty" json:"url,omitempty"`
}
type TestScriptSetupActionOperationRequestHeader struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Field string `bson:"field" json:"field"`
Value string `bson:"value" json:"value"`
}
type TestScriptSetupActionAssert struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Label *string `bson:"label,omitempty" json:"label,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Direction *AssertionDirectionType `bson:"direction,omitempty" json:"direction,omitempty"`
CompareToSourceId *string `bson:"compareToSourceId,omitempty" json:"compareToSourceId,omitempty"`
CompareToSourceExpression *string `bson:"compareToSourceExpression,omitempty" json:"compareToSourceExpression,omitempty"`
CompareToSourcePath *string `bson:"compareToSourcePath,omitempty" json:"compareToSourcePath,omitempty"`
ContentType *string `bson:"contentType,omitempty" json:"contentType,omitempty"`
Expression *string `bson:"expression,omitempty" json:"expression,omitempty"`
HeaderField *string `bson:"headerField,omitempty" json:"headerField,omitempty"`
MinimumId *string `bson:"minimumId,omitempty" json:"minimumId,omitempty"`
NavigationLinks *bool `bson:"navigationLinks,omitempty" json:"navigationLinks,omitempty"`
Operator *AssertionOperatorType `bson:"operator,omitempty" json:"operator,omitempty"`
Path *string `bson:"path,omitempty" json:"path,omitempty"`
RequestMethod *TestScriptRequestMethodCode `bson:"requestMethod,omitempty" json:"requestMethod,omitempty"`
RequestURL *string `bson:"requestURL,omitempty" json:"requestURL,omitempty"`
Resource *string `bson:"resource,omitempty" json:"resource,omitempty"`
Response *AssertionResponseTypes `bson:"response,omitempty" json:"response,omitempty"`
ResponseCode *string `bson:"responseCode,omitempty" json:"responseCode,omitempty"`
SourceId *string `bson:"sourceId,omitempty" json:"sourceId,omitempty"`
ValidateProfileId *string `bson:"validateProfileId,omitempty" json:"validateProfileId,omitempty"`
Value *string `bson:"value,omitempty" json:"value,omitempty"`
WarningOnly bool `bson:"warningOnly" json:"warningOnly"`
}
type TestScriptTest struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Name *string `bson:"name,omitempty" json:"name,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Action []TestScriptTestAction `bson:"action" json:"action"`
}
type TestScriptTestAction struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Operation *TestScriptSetupActionOperation `bson:"operation,omitempty" json:"operation,omitempty"`
Assert *TestScriptSetupActionAssert `bson:"assert,omitempty" json:"assert,omitempty"`
}
type TestScriptTeardown struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Action []TestScriptTeardownAction `bson:"action" json:"action"`
}
type TestScriptTeardownAction struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Operation TestScriptSetupActionOperation `bson:"operation,omitempty" json:"operation,omitempty"`
}
type OtherTestScript TestScript
// MarshalJSON marshals the given TestScript as JSON into a byte slice
func (r TestScript) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherTestScript
ResourceType string `json:"resourceType"`
}{
OtherTestScript: OtherTestScript(r),
ResourceType: "TestScript",
})
}
// UnmarshalTestScript unmarshals a TestScript.
func UnmarshalTestScript(b []byte) (TestScript, error) {
var testScript TestScript
if err := json.Unmarshal(b, &testScript); err != nil {
return testScript, err
}
return testScript, nil
}
|
package validators
import (
"math"
"sync"
"testing"
)
func TestGetFuncMap(t *testing.T) {
fm := GetFuncMap()
if fm != CustomFuncMap {
t.Fail()
}
}
func Test_customFuncMap_Set_Get(t *testing.T) {
type args struct {
tag string
validator Validator
}
tests := []struct {
name string
c *customFuncMap
args args
}{
{
name: "",
c: &customFuncMap{
rw: sync.RWMutex{},
validators: make(map[string]Validator),
},
args: args{
tag: "hello",
validator: func(args string, value, structure interface{}) error {
return nil
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.c.Set(tt.args.tag, tt.args.validator)
_, ok := tt.c.Get(tt.args.tag)
if !ok {
t.Fatalf("expected to find function %s in FuncMap", tt.args.tag)
}
})
}
}
func TestRequired(t *testing.T) {
var p *string
var f func()
var c chan int
var s []int
var m map[int]int
type args struct {
in0 string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "should not fail", args: args{"", ""}},
{name: "should fail pointer", args: args{"", p}, wantErr: true},
{name: "should fail func", args: args{"", f}, wantErr: true},
{name: "should fail channel", args: args{"", c}, wantErr: true},
{name: "should fail slice", args: args{"", s}, wantErr: true},
{name: "should fail map", args: args{"", m}, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := Required(tt.args.in0, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("Required() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIn(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should not fail (int)",
args: args{
args: "0|5|10",
value: 5,
},
wantErr: false,
},
{
name: "should fail (int)",
args: args{
args: "0|10",
value: 11,
},
wantErr: true,
},
{
name: "should not fail (string)",
args: args{
args: "hello|world",
value: "hello",
},
wantErr: false,
},
{
name: "should fail (string)",
args: args{
args: "hello",
value: "hello world",
},
wantErr: true,
},
{
name: "should not fail (string slice)",
args: args{
args: "hello|world",
value: []string{"hello", "world"},
},
wantErr: false,
},
{
name: "should fail (string slice)",
args: args{
args: "hello|world",
value: []string{"hello world"},
},
wantErr: true,
},
{
name: "should fail (slice of int)",
args: args{
args: "1|2",
value: []int{1, 3, 4},
},
wantErr: true,
},
{
name: "invalid parameters for numeric in",
args: args{
args: "hello|world",
value: 1,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := In(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("In() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestMaxchar(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "invalid parameter",
args: args{
args: "hello",
value: "hello",
},
wantErr: true,
},
{
name: "invalid type",
args: args{
args: "10",
value: 123,
},
wantErr: true,
},
{
name: "should not fail",
args: args{
args: "10",
value: "hello",
},
},
{
name: "should fail",
args: args{
args: "10",
value: "hello world",
},
wantErr: true,
},
{
name: "should not fail (slice of string)",
args: args{
args: "10",
value: []string{"hello", "world"},
},
},
{
name: "should fail (slice of string)",
args: args{
args: "10",
value: []string{"hello world"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := Maxchar(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("Maxchar() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBytesBetween(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "broken range",
args: args{
args: "0..",
value: "hello",
},
wantErr: true,
},
{
name: "invalid parameter",
args: args{
args: "0..10",
value: []int{1, 2, 3},
},
wantErr: true,
},
{
name: "should not fail",
args: args{
args: "0..10",
value: "hello",
},
wantErr: false,
},
{
name: "should fail (too long)",
args: args{
args: "0..10",
value: "hello world",
},
wantErr: true,
},
{
name: "should not fail (multi-byte characters)",
args: args{
args: "0..3",
value: "愛",
},
wantErr: false,
},
{
name: "should not fail (multi-byte characters, closed range)",
args: args{
args: "3..3",
value: "愛",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := BytesBetween(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("BytesBetween() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBetween(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "broken range",
args: args{
args: "0..",
value: 123,
},
wantErr: true,
},
{
name: "invalid parameter",
args: args{
args: "0..10",
value: []int{1, 2, 3},
},
wantErr: true,
},
{
name: "should not fail",
args: args{
args: "0..10",
value: "hello",
},
wantErr: false,
},
{
name: "should fail (too long)",
args: args{
args: "0..10",
value: "hello world",
},
wantErr: true,
},
{
name: "should not fail (int)",
args: args{
args: "0..10",
value: 5,
},
wantErr: false,
},
{
name: "should fail (int too big)",
args: args{
args: "0..10",
value: 11,
},
wantErr: true,
},
{
name: "should not fail (multi-byte characters)",
args: args{
args: "0..1",
value: "愛",
},
wantErr: false,
},
{
name: "should not fail (multi-byte characters, closed range)",
args: args{
args: "1..1",
value: "愛",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := Between(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("Between() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestEmptyString(t *testing.T) {
type args struct {
in0 string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should not fail",
args: args{
value: "",
},
wantErr: false,
},
{
name: "should fail",
args: args{
value: "has text",
},
wantErr: true,
},
{
name: "invalid type",
args: args{
in0: "",
value: 123,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := EmptyString(tt.args.in0, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("EmptyString() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIsInt64(t *testing.T) {
type args struct {
in0 string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should not fail (can convert to int64)",
args: args{
in0: "",
value: "123",
},
wantErr: false,
},
{
name: "should fail (cannot convert to int64)",
args: args{
in0: "",
value: "hello",
},
wantErr: true,
},
{
name: "invalid type",
args: args{
in0: "",
value: []string{"123"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsInt64(tt.args.in0, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("IsInt64() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestIsFloat64(t *testing.T) {
type args struct {
in0 string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should not fail (can convert to float64)",
args: args{
in0: "",
value: "123",
},
wantErr: false,
},
{
name: "should fail (cannot convert to float64)",
args: args{
in0: "",
value: "hello",
},
wantErr: true,
},
{
name: "invalid type",
args: args{
in0: "",
value: []string{"123"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := IsFloat64(tt.args.in0, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("IsFloat64() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestMatches(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should fail (no matchers)",
args: args{
args: "gibberish",
value: "",
},
wantErr: true,
},
{
name: "should fail (no matchers []byte)",
args: args{
args: "gibberish",
value: []byte(``),
},
wantErr: true,
},
{
name: "should fail (no matchers []string)",
args: args{
args: "gibberish",
value: []string{},
},
wantErr: true,
},
{
name: "should not fail (match email)",
args: args{
args: "email",
value: "someone@gmail.com",
},
wantErr: false,
},
{
name: "should not fail (match email []byte)",
args: args{
args: "email",
value: []byte(`someone@gmail.com`),
},
wantErr: false,
},
{
name: "should fail (not email)",
args: args{
args: "email",
value: "",
},
wantErr: true,
},
{
name: "should fail (not email []byte)",
args: args{
args: "email",
value: []byte(``),
},
wantErr: true,
},
{
name: "should not fail (url)",
args: args{
args: "url",
value: "https://google.com",
},
wantErr: false,
},
{
name: "should not fail ([]string email)",
args: args{
args: "email",
value: []string{"someone@gmail.com"},
},
wantErr: false,
},
{
name: "should fail ([]string not email)",
args: args{
args: "email",
value: []string{""},
},
wantErr: true,
},
{
name: "unexpected type",
args: args{
args: "email",
value: map[string]string{},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := Matches(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("Matches() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_bounds(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
wantMin float64
wantMax float64
wantErr bool
}{
{
name: "valid range",
args: args{
s: "0..10",
},
wantMin: 0.0,
wantMax: 10.0,
wantErr: false,
},
{
name: "valid negative range",
args: args{
s: "-10..-1",
},
wantMin: -10.0,
wantMax: -1.0,
wantErr: false,
},
{
name: "right range broken",
args: args{
s: "0..",
},
wantMin: 0,
wantMax: 0,
wantErr: true,
},
{
name: "left range broken",
args: args{
s: "..10",
},
wantMin: 0,
wantMax: 0,
wantErr: true,
},
{
name: "invalid range types",
args: args{
s: "hello..world",
},
wantMin: 0,
wantMax: 0,
wantErr: true,
},
{
name: "left invalid",
args: args{
s: "hello..10",
},
wantMin: 0,
wantMax: 0,
wantErr: true,
},
{
name: "right invalid",
args: args{
s: "0..hello",
},
wantMin: 0,
wantMax: 0,
wantErr: true,
},
{
name: "left wildcard",
args: args{
s: "*..10",
},
wantMin: -math.MaxFloat64,
wantMax: 10,
wantErr: false,
},
{
name: "right wilcard",
args: args{
s: "0..*",
},
wantMin: 0,
wantMax: math.MaxFloat64,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotMin, gotMax, err := bounds(tt.args.s)
if (err != nil) != tt.wantErr {
t.Errorf("bounds() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotMin != tt.wantMin {
t.Errorf("bounds() gotMin = %v, want %v", gotMin, tt.wantMin)
}
if gotMax != tt.wantMax {
t.Errorf("bounds() gotMax = %v, want %v", gotMax, tt.wantMax)
}
})
}
}
func Test_checkLength(t *testing.T) {
type args struct {
args string
value interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "broken range",
args: args{
args: "0..",
value: "hello",
},
wantErr: true,
},
{
name: "should not fail",
args: args{
args: "0..10",
value: "hello",
},
wantErr: false,
},
{
name: "should fail (too long)",
args: args{
args: "0..10",
value: "hello world",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkLength(tt.args.args, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("checkLength() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_strIn(t *testing.T) {
type args struct {
str string
values []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "",
args: args{
str: "",
values: nil,
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := strIn(tt.args.str, tt.args.values); got != tt.want {
t.Errorf("strIn() = %v, want %v", got, tt.want)
}
})
}
}
func Test_f64(t *testing.T) {
type args struct {
f float64
}
tests := []struct {
name string
args args
want string
}{
{
name: "float with precision",
args: args{
f: 64.12,
},
want: "64.12",
},
{
name: "float without precision",
args: args{
f: 64.0,
},
want: "64",
},
{
name: "max float",
args: args{
f: math.MaxFloat64,
},
want: "max float64",
},
{
name: "min float",
args: args{
f: -math.MaxFloat64,
},
want: "min float64",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := f64(tt.args.f); got != tt.want {
t.Errorf("f64() = %v, want %v", got, tt.want)
}
})
}
}
|
package cfs
import (
"bazil.org/fuse"
"bufio"
"bytes"
"errors"
"fmt"
"github.com/tigcode/containerfs/logger"
dp "github.com/tigcode/containerfs/proto/dp"
mp "github.com/tigcode/containerfs/proto/mp"
//"github.com/tigcode/containerfs/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"io"
//"io/ioutil"
"math/rand"
"os"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
)
// MetaNodePeers ...
var MetaNodePeers []string
//MetaNodeAddr ...
var MetaNodeAddr string
// chunksize for write
const (
chunkSize = 64 * 1024 * 1024
oneExpandSize = 30 * 1024 * 1024 * 1024
BlockGroupSize = 5 * 1024 * 1024 * 1024
)
const (
FileNormal = 0
FileRetry = 1
FileError = 2
)
// BufferSize ...
var BufferSize int
// CFS ...
type CFS struct {
VolID string
Leader string
Conn *grpc.ClientConn
DataConnLocker sync.RWMutex
//DataConn map[string]*grpc.ClientConn
//Status int // 0 ok , 1 readonly 2 invaild
}
/*
func (cfs *CFS) GetDataConn(addr string) (*grpc.ClientConn, error) {
cfs.DataConnLocker.RLock()
if v, ok := cfs.DataConn[addr]; ok {
cfs.DataConnLocker.RUnlock()
return v, nil
}
cfs.DataConnLocker.RUnlock()
return nil, errors.New("Key not exists")
}
func (cfs *CFS) SetDataConn(addr string, conn *grpc.ClientConn) {
cfs.DataConnLocker.Lock()
cfs.DataConn[addr] = conn
cfs.DataConnLocker.Unlock()
}
func (cfs *CFS) DelDataConn(addr string) {
cfs.DataConnLocker.Lock()
delete(cfs.DataConn, addr)
cfs.DataConnLocker.Unlock()
}
*/
func GetAllDatanode() (int32, []*mp.Datanode) {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("GetAllDatanode failed,Dial to metanode fail :%v", err)
return -1, nil
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pGetAllDatanodeReq := &mp.GetAllDatanodeReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetAllDatanodeAck, err := mc.GetAllDatanode(ctx, pGetAllDatanodeReq)
if err != nil {
logger.Error("GetAllDatanode failed,grpc func err :%v", err)
return -1, nil
}
if pGetAllDatanodeAck.Ret != 0 {
logger.Error("GetAllDatanode failed,grpc func ret :%v", pGetAllDatanodeAck.Ret)
return -1, nil
}
return 0, pGetAllDatanodeAck.Datanodes
}
func DelDatanode(host string) int {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("GetAllDatanode failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pDelDatanodeReq := &mp.DelDatanodeReq{
Host: host,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ack, err := mc.DelDatanode(ctx, pDelDatanodeReq)
if err != nil {
logger.Error("DelDatanode failed,grpc func err :%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("DelDatanode failed,grpc func ret :%v", ack.Ret)
return -1
}
return 0
}
// CreateVol volume function by Meta
func CreateVolbyMeta(name string, capacity string, tier string) int32 {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("CreateVol failed,Dial to Cluster leader metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pCreateVolReq := &mp.CreateVolReq{
VolName: name,
SpaceQuota: int32(spaceQuota),
Tier: tier,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := mc.CreateVol(ctx, pCreateVolReq)
if err != nil {
logger.Error("CreateVol failed, Cluster leader metanode return failed, err:%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("CreateVol failed, Cluster leader metanode return failed, ret:%v", ack.Ret)
if ack.UUID != "" {
DeleteVol(ack.UUID)
}
return -1
}
fmt.Println(ack.UUID)
return 0
}
// Expand volume once for fuseclient
func ExpandVolRS(UUID string, MtPath string) int32 {
path := MtPath + "/expanding"
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return -2
}
defer fd.Close()
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("ExpandVolRS failed,Dial to Cluster leader metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pExpandVolRSReq := &mp.ExpandVolRSReq{
VolID: UUID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pExpandVolRSAck, err := mc.ExpandVolRS(ctx, pExpandVolRSReq)
if err != nil {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return error:%v", UUID, err)
os.Remove(path)
return -1
}
if pExpandVolRSAck.Ret == -1 {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return -1:%v", UUID)
os.Remove(path)
return -1
} else if pExpandVolRSAck.Ret == 0 {
logger.Error("ExpandVol volume:%v once failed, Cluster leader metanode return 0 because volume totalsize not enough expand", UUID)
os.Remove(path)
return 0
}
out := UpdateMetaForExpandVol(UUID, pExpandVolRSAck)
if out != 0 {
logger.Error("ExpandVol volume:%v once cluster leader metanode success but update volume leader metanode fail, so rollback cluster leader metanode this expand resource", UUID)
pDelReq := &mp.DelVolRSForExpandReq{
UUID: UUID,
BGPS: pExpandVolRSAck.BGPS,
}
pDelAck, err := mc.DelVolRSForExpand(ctx, pDelReq)
if err != nil || pDelAck.Ret != 0 {
logger.Error("ExpandVol once volume:%v success but update meta failed, then rollback cluster leader metanode error", UUID)
}
os.Remove(path)
return -1
}
os.Remove(path)
return 1
}
func UpdateMetaForExpandVol(UUID string, ack *mp.ExpandVolRSAck) int {
var mpBlockGroups []*mp.BlockGroup
for _, v := range ack.BGPS {
mpBlockGroup := &mp.BlockGroup{
BlockGroupID: v.Blocks[0].BGID,
FreeSize: BlockGroupSize,
}
mpBlockGroups = append(mpBlockGroups, mpBlockGroup)
}
logger.Debug("ExpandVolRS volume:%v to leader metanode BlockGroups Info:%v", UUID, mpBlockGroups)
// Meta handle
conn2, err := DialMeta(UUID)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but Dial to metanode fail :%v", UUID, err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmExpandNameSpaceReq := &mp.ExpandNameSpaceReq{
VolID: UUID,
BlockGroups: mpBlockGroups,
}
ctx2, _ := context.WithTimeout(context.Background(), 10*time.Second)
pmExpandNameSpaceAck, err := mc.ExpandNameSpace(ctx2, pmExpandNameSpaceReq)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return error:%v", UUID, err)
return -1
}
if pmExpandNameSpaceAck.Ret != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return not equal 0:%v", UUID)
return -1
}
return 0
}
// ExpandVol volume totalsize for CLI...
func ExpandVolTS(UUID string, expandQuota string) int32 {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("ExpandVolTS failed,Dial to Cluster leader metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
tmpExpandQuota, _ := strconv.Atoi(expandQuota)
pExpandVolTSReq := &mp.ExpandVolTSReq{
VolID: UUID,
ExpandQuota: int32(tmpExpandQuota),
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pExpandVolTSAck, err := mc.ExpandVolTS(ctx, pExpandVolTSReq)
if err != nil {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return error:%v", UUID, expandQuota, err)
return -1
}
if pExpandVolTSAck.Ret != 0 {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return -1", UUID, expandQuota)
return -1
}
return 0
}
// Migrate bad DataNode blocks data to some Good DataNodes
func Migrate(host string) int32 {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("Migrate failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pMigrateReq := &mp.MigrateReq{
DataNodeHost: host,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
_, err = mc.Migrate(ctx, pMigrateReq)
if err != nil {
logger.Error("Migrate bad DataNode(%v) all Blocks not finished err : %v", host, err)
return -1
}
return 0
}
// GetVolInfo volume info
func GetVolInfo(name string) (int32, *mp.GetVolInfoAck) {
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("GetVolInfo failed,Dial to metanode fail :%v", err)
return -1, &mp.GetVolInfoAck{}
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pGetVolInfoReq := &mp.GetVolInfoReq{
UUID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := mc.GetVolInfo(ctx, pGetVolInfoReq)
if err != nil || ack.Ret != 0 {
return -1, &mp.GetVolInfoAck{}
}
return 0, ack
}
// SnapShotVol ...
func SnapShotVol(uuid string) int32 {
// send to metadata to delete a map
conn, err := DialMeta(uuid)
if err != nil {
logger.Error("SnapShotVol failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pmSnapShootNameSpaceReq := &mp.SnapShootNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pmSnapShootNameSpaceAck, err := mc.SnapShotNameSpace(ctx, pmSnapShootNameSpaceReq)
if err != nil {
logger.Error("SnapShotVol failed,grpc func err :%v", err)
return -1
}
if pmSnapShootNameSpaceAck.Ret != 0 {
logger.Error("SnapShotVol failed,rpc func ret:%v", pmSnapShootNameSpaceAck.Ret)
return -1
}
return 0
}
func GetVolumeLeader(uuid string) string {
leader, err := GetLeader(uuid)
if err != nil {
return "no leader"
}
return leader
}
// DeleteVol function
func DeleteVol(uuid string) int32 {
// send to metadata to delete a map
conn2, err := DialMeta(uuid)
if err != nil {
logger.Error("DeleteVol failed,Dial to volume leader metanode fail :%v\n", err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmDeleteNameSpaceReq := &mp.DeleteNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pmDeleteNameSpaceAck, err := mc.DeleteNameSpace(ctx, pmDeleteNameSpaceReq)
if err != nil {
return -1
}
if pmDeleteNameSpaceAck.Ret != 0 {
logger.Error("DeleteNameSpace failed :%v", pmDeleteNameSpaceAck.Ret)
return -1
}
conn2, err = DialMeta("Cluster")
if err != nil {
logger.Error("DeleteVol failed,Dial to Cluster leader metanode fail :%v\n", err)
return -1
}
defer conn2.Close()
mc = mp.NewMetaNodeClient(conn2)
pDeleteVolReq := &mp.DeleteVolReq{
UUID: uuid,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
pDeleteVolAck, err := mc.DeleteVol(ctx, pDeleteVolReq)
if err != nil {
logger.Error("DeleteVol volume from Cluster leader failed,grpc func err :%v", err)
return -1
}
if pDeleteVolAck.Ret != 0 {
logger.Error("DeleteVol from Cluster leader failed,grpc func ret :%v", pDeleteVolAck.Ret)
return -1
}
return 0
}
// GetFSInfo ...
func GetFSInfo(name string) (int32, *mp.GetFSInfoAck) {
conn, err := DialMeta(name)
if err != nil {
logger.Error("GetFSInfo failed,Dial to metanode fail :%v\n", err)
return -1, nil
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pGetFSInfoReq := &mp.GetFSInfoReq{
VolID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFSInfoAck, err := mc.GetFSInfo(ctx, pGetFSInfoReq)
if err != nil {
logger.Error("GetFSInfo failed,grpc func err :%v", err)
return 1, nil
}
if pGetFSInfoAck.Ret != 0 {
logger.Error("GetFSInfo failed,grpc func ret :%v", pGetFSInfoAck.Ret)
return 1, nil
}
return 0, pGetFSInfoAck
}
// OpenFileSystem ...
func OpenFileSystem(UUID string) *CFS {
leader, err := GetLeader(UUID)
if err != nil {
return nil
}
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
return nil
}
cfs := CFS{VolID: UUID, Conn: conn, Leader: leader}
ticker := time.NewTicker(time.Millisecond * 500)
go func() {
for range ticker.C {
leader, err := GetLeader(UUID)
if err != nil {
cfs.Leader = ""
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = nil
logger.Error("Leader Timer : Get leader failed ,volumeID : %s", UUID)
continue
}
if leader != cfs.Leader {
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
logger.Error("Leader Timer : DialMeta failed ,volumeID : %s", UUID)
continue
}
cfs.Leader = leader
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = conn
}
}
}()
return &cfs
}
// CreateDirDirect ...
func (cfs *CFS) CreateDirDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateDirDirectReq := &mp.CreateDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err := mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err = mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
return -1, 0
}
}
return pCreateDirDirectAck.Ret, pCreateDirDirectAck.Inode
}
// GetInodeInfoDirect ...
func (cfs *CFS) GetInodeInfoDirect(pinode uint64, name string) (int32, uint64, *mp.InodeInfo) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetInodeInfoDirectReq := &mp.GetInodeInfoDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err := mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err = mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
return -1, 0, nil
}
}
return pGetInodeInfoDirectAck.Ret, pGetInodeInfoDirectAck.Inode, pGetInodeInfoDirectAck.InodeInfo
}
// StatDirect ...
func (cfs *CFS) StatDirect(pinode uint64, name string) (int32, bool, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pStatDirectReq := &mp.StatDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err := mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err = mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
return -1, false, 0
}
}
return pStatDirectAck.Ret, pStatDirectAck.InodeType, pStatDirectAck.Inode
}
// ListDirect ...
func (cfs *CFS) ListDirect(pinode uint64) (int32, []*mp.DirentN) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: pinode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 60*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
return -1, nil
}
return pListDirectAck.Ret, pListDirectAck.Dirents
}
// DeleteDirDirect ...
func (cfs *CFS) DeleteDirDirect(pinode uint64, name string) int32 {
ret, _, inode := cfs.StatDirect(pinode, name)
if ret != 0 {
logger.Debug("DeleteDirDirect StatDirect Failed , no such dir")
return 0
}
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: inode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
logger.Error("DeleteDirDirect ListDirect :%v\n", err)
return -1
}
for _, v := range pListDirectAck.Dirents {
if v.InodeType {
ret := cfs.DeleteFileDirect(inode, v.Name)
if ret != 0 {
return ret
}
} else {
ret := cfs.DeleteDirDirect(inode, v.Name)
if ret != 0 {
return ret
}
}
}
pDeleteDirDirectReq := &mp.DeleteDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ = context.WithTimeout(context.Background(), 60*time.Second)
pDeleteDirDirectAck, err := mc.DeleteDirDirect(ctx, pDeleteDirDirectReq)
if err != nil {
return -1
}
return pDeleteDirDirectAck.Ret
}
// RenameDirect ...
func (cfs *CFS) RenameDirect(oldpinode uint64, oldname string, newpinode uint64, newname string) int32 {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pRenameDirectReq := &mp.RenameDirectReq{
OldPInode: oldpinode,
OldName: oldname,
NewPInode: newpinode,
NewName: newname,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pRenameDirectAck, err := mc.RenameDirect(ctx, pRenameDirectReq)
if err != nil {
return -1
}
return pRenameDirectAck.Ret
}
// CreateFileDirect ...
func (cfs *CFS) CreateFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var writer int32
if flags&os.O_EXCL != 0 {
if ret, _, _ := cfs.StatDirect(pinode, name); ret == 0 {
return 17, nil
}
}
ret, inode := cfs.createFileDirect(pinode, name)
if ret != 0 {
return ret, nil
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
//WriteRetrySignal: make(chan bool, 1024),
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
go cfile.WriteThread()
return 0, &cfile
}
// OpenFileDirect ...
func (cfs *CFS) OpenFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var ret int32
var writer int32
var tmpFileSize int64
cfile := CFile{}
logger.Debug("OpenFileDirect: name: %v, flags: %v\n", name, flags)
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret, nil
}
if len(chunkInfos) > 0 {
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
ParentInodeID: pinode,
Inode: inode,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
Name: name,
chunks: chunkInfos,
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
//WriteRetrySignal: make(chan bool, 1024),
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
} else {
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
//WriteRetrySignal: make(chan bool, 1024),
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
go cfile.WriteThread()
} else {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
logger.Error("OpenFile failed , GetFileChunksDirect failed !")
return ret, nil
}
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
ParentInodeID: pinode,
Inode: inode,
Name: name,
chunks: chunkInfos,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
return 0, &cfile
}
// UpdateOpenFileDirect ...
func (cfs *CFS) UpdateOpenFileDirect(pinode uint64, name string, cfile *CFile, flags int) int32 {
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var ret int32
if ret, chunkInfos, _ = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret
}
cfile.chunks = chunkInfos
}
return 0
}
// createFileDirect ...
func (cfs *CFS) createFileDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateFileDirectReq := &mp.CreateFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err := mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil || pCreateFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err = mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil {
logger.Error("CreateFileDirect failed,grpc func failed :%v\n", err)
return -1, 0
}
}
if pCreateFileDirectAck.Ret == 1 {
return 1, 0
}
if pCreateFileDirectAck.Ret == 2 {
return 2, 0
}
if pCreateFileDirectAck.Ret == 17 {
return 17, 0
}
return 0, pCreateFileDirectAck.Inode
}
// DeleteFileDirect ...
func (cfs *CFS) DeleteFileDirect(pinode uint64, name string) int32 {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
mpDeleteFileDirectReq := &mp.DeleteFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err := mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil || mpDeleteFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err = mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil {
logger.Error("DeleteFile failed,grpc func err :%v\n", err)
return -1
}
}
//go func() {
ret, chunkInfos, _ := cfs.GetFileChunksDirect(pinode, name)
if ret == 0 && chunkInfos != nil {
for _, v1 := range chunkInfos {
for _, v2 := range v1.BGP.Blocks {
conn, err := DialData(v2.Host)
if err != nil || conn == nil {
time.Sleep(time.Second)
conn, err = DialData(v2.Host)
if err != nil || conn == nil {
logger.Error("DeleteFile failed,Dial to datanode fail :%v\n", err)
continue
}
}
defer conn.Close()
dc := dp.NewDataNodeClient(conn)
dpDeleteChunkReq := &dp.DeleteChunkReq{
ChunkID: v1.ChunkID,
BlockID: v2.BlkID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
logger.Error("DeleteFile failed,rpc to datanode fail :%v\n", err)
continue
}
}
}
}
//}()
return mpDeleteFileDirectAck.Ret
}
// GetFileChunksDirect ...
func (cfs *CFS) GetFileChunksDirect(pinode uint64, name string) (int32, []*mp.ChunkInfoWithBG, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetFileChunksDirectReq := &mp.GetFileChunksDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err := mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil || pGetFileChunksDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err = mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil {
logger.Error("GetFileChunks failed,grpc func failed :%v\n", err)
return -1, nil, 0
}
}
logger.Debug("GetFileChunksDirect pGetFileChunksDirectAck %v", pGetFileChunksDirectAck)
return pGetFileChunksDirectAck.Ret, pGetFileChunksDirectAck.ChunkInfos, pGetFileChunksDirectAck.Inode
}
type Data struct {
DataBuf *bytes.Buffer
Status int32
timer *time.Timer
ID uint64
}
// ReaderInfo ...
type ReaderInfo struct {
LastOffset int64
readBuf []byte
Ch chan *bytes.Buffer
}
type wBuffer struct {
freeSize int // chunk size
buffer *bytes.Buffer // chunk data
}
type chanData struct {
data []byte
}
type Chunk struct {
CFile *CFile
ChunkFreeSize int
ChunkInfo *mp.ChunkInfoWithBG
ChunkWriteSteam dp.DataNode_C2MReplClient
ChunkWriteRecvExitSignal chan struct{}
}
// CFile ...
type CFile struct {
cfs *CFS
ParentInodeID uint64
Name string
Inode uint64
OpenFlag int
FileSize int64
Status int32 // 0 ok
// for write
wBuffer wBuffer
atomicNum uint64
Writer int32
DataCacheLocker sync.RWMutex
DataCache map[uint64]*Data
DataQueue chan *chanData
WriteErrSignal chan bool
WriteRetrySignal chan bool
Closing bool
CloseSignal chan struct{}
CurChunk *Chunk
WriteLocker sync.Mutex
// for read
//lastoffset int64
RMutex sync.Mutex
chunks []*mp.ChunkInfoWithBG // chunkinfo
//readBuf []byte
ReaderMap map[fuse.HandleID]*ReaderInfo
}
func generateRandomNumber(start int, end int, count int) []int {
if end < start || (end-start) < count {
return nil
}
nums := make([]int, 0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for len(nums) < count {
num := r.Intn((end - start)) + start
exist := false
for _, v := range nums {
if v == num {
exist = true
break
}
}
if !exist {
nums = append(nums, num)
}
}
return nums
}
func (cfile *CFile) streamread(chunkidx int, ch chan *bytes.Buffer, offset int64, size int64) {
var conn *grpc.ClientConn
var err error
var buffer *bytes.Buffer
outflag := 0
inflag := 0
idxs := generateRandomNumber(0, 3, 3)
for n := 0; n < len(cfile.chunks[chunkidx].BGP.Blocks); n++ {
i := idxs[n]
buffer = new(bytes.Buffer)
addr := cfile.chunks[chunkidx].BGP.Blocks[i].Host
conn, err = DialData(addr)
if err != nil || conn == nil {
time.Sleep(time.Second)
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("streamread failed,Dial to datanode fail :%v", err)
outflag++
continue
}
}
dc := dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BGP.Blocks[i].BlkID,
Offset: offset,
Readsize: size,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
stream, err := dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
conn.Close()
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("StreamReadChunk DialData error:%v, so retry other datanode!", err)
outflag++
continue
} else {
dc = dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BGP.Blocks[i].BlkID,
Offset: offset,
Readsize: size,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
stream, err = dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
conn.Close()
logger.Error("StreamReadChunk StreamReadChunk error:%v, so retry other datanode!", err)
outflag++
continue
}
}
}
for {
ack, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
logger.Error("=== streamreadChunkReq Recv err:%v ===", err)
inflag++
outflag++
break
}
if ack != nil {
if len(ack.Databuf) == 0 {
continue
} else {
buffer.Write(ack.Databuf)
inflag = 0
}
} else {
continue
}
}
if inflag == 0 {
ch <- buffer
break
} else if inflag == 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Recv error")
ch <- buffer
break
} else if inflag < 3 {
logger.Error("Stream Read the chunk %v copy Recv error, so need retry other datanode!!!", inflag)
continue
}
}
if outflag >= 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Datanode error")
ch <- buffer
}
}
// Read ...
func (cfile *CFile) Read(handleID fuse.HandleID, data *[]byte, offset int64, readsize int64) int64 {
// read data from write buffer
if cfile.chunks == nil || len(cfile.chunks) == 0 {
logger.Error("Read File but Chunks not exist")
return -1
}
if offset+readsize > cfile.FileSize {
readsize = cfile.FileSize - offset
}
var length int64
var freeOffset int64
var freeSize int64
var beginChunkNum int
var endChunkNum int
curOffset := offset
for i, v := range cfile.chunks {
freeOffset = curOffset - int64(v.ChunkSize)
if freeOffset < 0 {
beginChunkNum = i
break
} else {
curOffset = freeOffset
}
}
curSize := offset + readsize
for i, v := range cfile.chunks {
freeSize = curSize - int64(v.ChunkSize)
if freeSize <= 0 {
endChunkNum = i
break
} else {
curSize = freeSize
}
}
var eachReadLen int64
freesize := readsize
if endChunkNum < beginChunkNum {
logger.Error("This Read data from beginchunk:%v lager than endchunk:%v", beginChunkNum, endChunkNum)
return -1
}
if beginChunkNum > len(cfile.chunks) || endChunkNum+1 > len(cfile.chunks) || beginChunkNum > cap(cfile.chunks) || endChunkNum+1 > cap(cfile.chunks) {
logger.Error("Read begin or end chunk num not right")
return -1
}
//for i, _ := range cfile.chunks[beginChunkNum : endChunkNum+1] {
for i := 0; i < len(cfile.chunks[beginChunkNum:endChunkNum+1]); i++ {
index := i + beginChunkNum
if curOffset+freesize < int64(cfile.chunks[index].ChunkSize) {
eachReadLen = freesize
} else {
eachReadLen = int64(cfile.chunks[index].ChunkSize) - curOffset
}
if len(cfile.ReaderMap[handleID].readBuf) == 0 {
buffer := new(bytes.Buffer)
cfile.ReaderMap[handleID].Ch = make(chan *bytes.Buffer)
go cfile.streamread(index, cfile.ReaderMap[handleID].Ch, 0, int64(cfile.chunks[index].ChunkSize))
buffer = <-cfile.ReaderMap[handleID].Ch
if buffer.Len() == 0 {
logger.Error("Recv chunk:%v from datanode size:%v , but retsize is 0", index, cfile.chunks[index].ChunkSize)
return -1
}
cfile.ReaderMap[handleID].readBuf = buffer.Next(buffer.Len())
buffer.Reset()
buffer = nil
//logger.Debug("#### Read chunk:%v == bufferlen:%v == curoffset:%v == eachlen:%v ==offset:%v == readsize:%v ####", index, len(cfile.ReaderMap[handleID].readBuf), curOffset, eachReadLen, offset, readsize)
}
buflen := int64(len(cfile.ReaderMap[handleID].readBuf))
bufcap := int64(cap(cfile.ReaderMap[handleID].readBuf))
if curOffset > buflen || curOffset > bufcap {
logger.Error("== Read chunk:%v from datanode (offset:%v -- needreadsize:%v) lager than exist (buflen:%v -- bufcap:%v)\n", index, curOffset, eachReadLen, buflen, bufcap)
return -1
}
if curOffset+eachReadLen > buflen {
eachReadLen = buflen - curOffset
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
} else {
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
}
curOffset += eachReadLen
if curOffset == int64(len(cfile.ReaderMap[handleID].readBuf)) {
curOffset = 0
cfile.ReaderMap[handleID].readBuf = []byte{}
}
freesize = freesize - eachReadLen
length += eachReadLen
}
return length
}
// Write ...
func (cfile *CFile) Write(buf []byte, length int32) int32 {
if cfile.Status == FileError {
return -2
}
data := &chanData{}
data.data = append(data.data, buf...)
select {
case <-cfile.WriteErrSignal:
logger.Error("Write recv WriteErrSignal ,volumeid %v , pid %v ,fname %v!", cfile.cfs.VolID, cfile.ParentInodeID, cfile.Name)
return -2
case cfile.DataQueue <- data:
}
return length
}
func (cfile *CFile) WriteThread() {
logger.Debug("Write Thread: file %v start writethread!\n", cfile.Name)
for cfile.Status != FileError {
select {
case chanData := <-cfile.DataQueue:
if chanData == nil {
logger.Debug("WriteThread recv channel close ...")
var ti uint32
for true && cfile.Status == FileNormal {
if len(cfile.DataCache) == 0 {
logger.Debug("WriteThread cfile.DataCache == 0 ")
break
}
ti++
time.Sleep(time.Millisecond * 5)
}
if cfile.CurChunk.ChunkWriteSteam != nil {
cfile.CurChunk.ChunkWriteSteam.CloseSend()
}
cfile.CloseSignal <- struct{}{}
return
} else {
newData := &Data{}
atomic.AddUint64(&cfile.atomicNum, 1)
newData.ID = cfile.atomicNum
newData.DataBuf = new(bytes.Buffer)
newData.DataBuf.Write(chanData.data)
newData.Status = 1
if err := cfile.WriteHandler(newData); err != nil {
logger.Error("WriteThread WriteHandler err !!%v", err)
cfile.Status = FileError
logger.Debug("WriteHandler send WriteErrSignal")
cfile.WriteErrSignal <- true
}
}
}
}
}
func (cfile *CFile) WriteHandler(newData *Data) error {
length := newData.DataBuf.Len()
logger.Debug("WriteHandler: file %v, num:%v, length: %v, \n", cfile.Name, cfile.atomicNum, length)
var ret int32
ALLOCATECHUNK:
if cfile.CurChunk != nil {
if cfile.CurChunk.ChunkFreeSize-length < 0 {
if cfile.CurChunk.ChunkWriteSteam != nil {
var ti uint32
logger.Debug("WriteHandler: file %v, begin waiting last chunk: %v\n", cfile.Name, len(cfile.DataCache))
for true && cfile.Status == FileNormal {
if len(cfile.DataCache) == 0 {
break
}
time.Sleep(time.Millisecond * 2)
ti++
}
if cfile.Status == FileError {
return errors.New("cfile status err")
}
logger.Debug("WriteHandler: file %v, end wait after %v ms\n", cfile.Name, ti)
cfile.CurChunk.ChunkWriteSteam.CloseSend()
flag := false
for retryCnt := 0; retryCnt < 5; retryCnt++ {
ret, cfile.CurChunk = cfile.AllocateChunk(true)
if ret != 0 {
time.Sleep(time.Millisecond * 500)
continue
} else {
flag = true
break
}
}
if !flag {
return errors.New("AllocateChunk on 5 retry ... ")
}
}
}
} else {
flag := false
for retryCnt := 0; retryCnt < 5; retryCnt++ {
ret, cfile.CurChunk = cfile.AllocateChunk(true)
if ret != 0 {
time.Sleep(time.Millisecond * 500)
continue
} else {
flag = true
break
}
}
if !flag {
return errors.New("AllocateChunk on 5 retry ... ")
}
}
cfile.DataCacheLocker.Lock()
cfile.DataCache[cfile.atomicNum] = newData
cfile.DataCacheLocker.Unlock()
req := &dp.StreamWriteReq{
ChunkID: cfile.CurChunk.ChunkInfo.ChunkID,
Master: &dp.Block{BlockID: cfile.CurChunk.ChunkInfo.BGP.Blocks[0].BlkID, Host: cfile.CurChunk.ChunkInfo.BGP.Blocks[0].Host},
Slave: &dp.Block{BlockID: cfile.CurChunk.ChunkInfo.BGP.Blocks[1].BlkID, Host: cfile.CurChunk.ChunkInfo.BGP.Blocks[1].Host},
Backup: &dp.Block{BlockID: cfile.CurChunk.ChunkInfo.BGP.Blocks[2].BlkID, Host: cfile.CurChunk.ChunkInfo.BGP.Blocks[2].Host},
Databuf: newData.DataBuf.Next(newData.DataBuf.Len()),
DataLen: uint32(length),
CommitID: cfile.atomicNum,
BlockGroupID: cfile.CurChunk.ChunkInfo.BGP.Blocks[0].BGID,
}
if cfile.CurChunk.ChunkWriteSteam != nil {
if err := cfile.CurChunk.ChunkWriteSteam.Send(req); err != nil {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v failed\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize = 0
} else {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v success\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize -= length
}
} else {
goto ALLOCATECHUNK
}
return nil
}
// AllocateChunk ...
func (cfile *CFile) AllocateChunk(IsStream bool) (int32, *Chunk) {
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
pAllocateChunkReq := &mp.AllocateChunkReq{
VolID: cfile.cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err := mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc = mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err = mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil {
logger.Error("AllocateChunk failed,grpc func failed :%v\n", err)
return -1, nil
}
}
curChunk := &Chunk{}
curChunk.CFile = cfile
curChunk.ChunkInfo = pAllocateChunkAck.ChunkInfo
if IsStream {
C2Mconn, err := grpc.Dial(pAllocateChunkAck.ChunkInfo.BGP.Blocks[0].Host, grpc.WithInsecure(), grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
if err != nil {
return -1, nil
}
C2Mclient := dp.NewDataNodeClient(C2Mconn)
curChunk.ChunkWriteSteam, err = C2Mclient.C2MRepl(context.Background())
if err != nil {
return -1, nil
}
curChunk.ChunkFreeSize = chunkSize
curChunk.ChunkWriteRecvExitSignal = make(chan struct{})
go curChunk.C2MRecv()
}
logger.Debug("AllocateChunk success: chunk info:%v\n", pAllocateChunkAck.ChunkInfo)
return pAllocateChunkAck.Ret, curChunk
}
func (chunk *Chunk) Retry() {
chunk.CFile.DataCacheLocker.Lock()
defer chunk.CFile.DataCacheLocker.Unlock()
flag := false
var err error
for retryCnt := 0; retryCnt < 5; retryCnt++ {
err = chunk.WriteRetryHandle()
if err != nil {
time.Sleep(time.Millisecond * 500)
continue
} else {
flag = true
break
}
}
if !flag {
chunk.CFile.Status = FileError
logger.Debug("C2MRecv send WriteErrSignal")
chunk.CFile.WriteErrSignal <- true
} else {
chunk.CFile.DataCache = make(map[uint64]*Data)
chunk.ChunkFreeSize = 0
chunk.ChunkWriteSteam = nil
}
}
func (chunk *Chunk) C2MRecv() {
logger.Debug("C2MRecv thread started success for file %v chunk %v", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
defer chunk.Retry()
for {
in, err := chunk.ChunkWriteSteam.Recv()
if err == io.EOF {
logger.Debug("C2MRecv: stream %v EOF\n", chunk.ChunkWriteSteam)
break
}
if err != nil {
logger.Debug("C2MRecv: stream %v error return :%v\n", chunk.ChunkWriteSteam, err)
break
}
if in.Ret == -1 {
logger.Error("C2MRecv ack.Ret -1 , means M2S2B stream err")
break
}
// comfirm data
chunk.CFile.DataCacheLocker.Lock()
//cfile.DataCache[in.CommitID].timer.Stop()
delete(chunk.CFile.DataCache, in.CommitID)
chunk.CFile.DataCacheLocker.Unlock()
// update to metanode
logger.Debug("C2MRecv: Write success! try to update metadata file: %v, ID;%v, chunk: %v, len: %v\n",
chunk.CFile.Name, in.CommitID, in.ChunkID, in.DataLen)
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.Conn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: in.ChunkID,
CommitSize: in.DataLen,
BlockGroupID: in.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
break
}
}
}
func (chunk *Chunk) WriteRetryHandle() error {
if len(chunk.CFile.DataCache) == 0 {
return nil
}
logger.Debug("WriteRetryHandle in ...")
ret, tmpchunk := chunk.CFile.AllocateChunk(false)
if ret != 0 {
return errors.New("error")
}
sortedKeys := make([]int, 0)
for k, _ := range chunk.CFile.DataCache {
sortedKeys = append(sortedKeys, int(k))
}
sort.Ints(sortedKeys)
var chunkSize int
for _, v := range tmpchunk.ChunkInfo.BGP.Blocks {
conn, err := DialData(v.Host)
if err != nil {
return err
}
dc := dp.NewDataNodeClient(conn)
chunkSize = 0
for _, vv := range sortedKeys {
req := dp.WriteChunkReq{ChunkID: tmpchunk.ChunkInfo.ChunkID, BlockID: v.BlkID, Databuf: chunk.CFile.DataCache[uint64(vv)].DataBuf.Next(chunk.CFile.DataCache[uint64(vv)].DataBuf.Len()), CommitID: uint64(vv)}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.WriteChunk(ctx, &req)
if err != nil {
return err
}
chunkSize += chunk.CFile.DataCache[uint64(vv)].DataBuf.Len()
}
}
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.Conn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: tmpchunk.ChunkInfo.ChunkID,
CommitSize: uint32(chunkSize),
BlockGroupID: tmpchunk.ChunkInfo.BGP.Blocks[0].BGID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
return err2
}
return nil
}
// Sync ...
func (cfile *CFile) Sync() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Sync ...
func (cfile *CFile) Flush() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Close ...
func (cfile *CFile) CloseWrite() int32 {
if cfile.Status == FileError {
return -1
} else {
cfile.Closing = true
logger.Debug("CloseWrite close cfile.DataQueue")
close(cfile.DataQueue)
<-cfile.CloseSignal
logger.Debug("CloseWrite recv CloseSignal!")
}
return 0
}
// ProcessLocalBuffer ...
func ProcessLocalBuffer(buffer []byte, cfile *CFile) {
cfile.Write(buffer, int32(len(buffer)))
}
// ReadLocalAndWriteCFS ...
func ReadLocalAndWriteCFS(filePth string, bufSize int, hookfn func([]byte, *CFile), cfile *CFile) error {
f, err := os.Open(filePth)
if err != nil {
return err
}
defer f.Close()
buf := make([]byte, bufSize)
bfRd := bufio.NewReader(f)
for {
n, err := bfRd.Read(buf)
hookfn(buf[:n], cfile)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
|
package util
import (
"fmt"
mpaTypes "multidim-pod-autoscaler/pkg/apis/autoscaling/v1"
containerUtil "multidim-pod-autoscaler/pkg/util/container"
"multidim-pod-autoscaler/pkg/util/limitrange"
recommendationUtil "multidim-pod-autoscaler/pkg/util/recommendation"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog"
)
// RecommendationProvider 获取指定 pod 的容器资源
type RecommendationProvider interface {
GetContainerResourcesForPod(
pod *corev1.Pod,
mpa *mpaTypes.MultidimPodAutoscaler,
) ([]containerUtil.Resources, recommendationUtil.ContainerAnnotationsMap, error)
}
type recommendationProvider struct {
limitRange limitrange.Calculator
recommendationProcessor recommendationUtil.Processor
}
// NewRecommendationProvider 返回一个新的 RecommendationProvider
func NewRecommendationProvider(
calculator limitrange.Calculator,
processor recommendationUtil.Processor) RecommendationProvider {
return &recommendationProvider{
limitRange: calculator,
recommendationProcessor: processor,
}
}
// GetContainerResourcesForPod 获取指定pod的容器的推荐资源(包含limit request的形式)
// admission需要将该信息写入pod.spec来创建新的pod
func (r *recommendationProvider) GetContainerResourcesForPod(
pod *corev1.Pod,
mpa *mpaTypes.MultidimPodAutoscaler,
) ([]containerUtil.Resources, recommendationUtil.ContainerAnnotationsMap, error) {
if mpa == nil || pod == nil {
klog.V(2).Infof("connot get recommendations, MPA(%v) or Pod(%v) is nil", mpa, pod)
return nil, nil, nil
}
var containerLimitRange *corev1.LimitRangeItem
var err error
if r.limitRange != nil {
containerLimitRange, err = r.limitRange.GetContainerLimitRangeItem(pod.Namespace)
if err != nil {
return nil, nil, fmt.Errorf("error getting container LimitRange: %s", err)
}
}
var resourcePolicy *mpaTypes.PodResourcePolicy
if mpa.Spec.UpdatePolicy == nil || mpa.Spec.UpdatePolicy.UpdateMode == nil || *mpa.Spec.UpdatePolicy.UpdateMode != mpaTypes.UpdateModeOff {
resourcePolicy = mpa.Spec.ResourcePolicy
}
// 获取 limit request 形式的资源(伸缩之后)
containerResources, annotations := getContainersResources(pod, resourcePolicy, mpa.Status.RecommendationResources, containerLimitRange)
return containerResources, annotations, nil
}
// getContainersResources 获取容器的推荐资源
func getContainersResources(
pod *corev1.Pod,
podPolicy *mpaTypes.PodResourcePolicy,
recommendation *mpaTypes.RecommendedResources,
limitRange *corev1.LimitRangeItem,
) ([]containerUtil.Resources, recommendationUtil.ContainerAnnotationsMap) {
if recommendation == nil {
return nil, nil
}
resources := make([]containerUtil.Resources, len(pod.Spec.Containers))
annotations := make(recommendationUtil.ContainerAnnotationsMap)
for i, container := range pod.Spec.Containers {
// 获取容器的推荐资源
containerRecomm := recommendationUtil.GetContainerRecommendation(container.Name, recommendation.ContainerRecommendations)
if containerRecomm == nil {
klog.V(2).Infof("no matching recommendation found for container %s", container.Name)
continue
} else {
// 推荐不为空 设为requests
resources[i].Requests = containerRecomm.Target
}
var defaultLimit corev1.ResourceList
if limitRange != nil {
defaultLimit = limitRange.Default
}
//containerControlledMode := mpaApi.GetContainerControlledMode(container.Name, podPolicy)
//if containerControlledMode == mpaTypes.ContainerControlledRequestsAndLimits {
// 需要同时伸缩 request 和 limit
recommLimit, anotation := containerUtil.GetProportionalLimit(
container.Resources.Limits, container.Resources.Requests,
resources[i].Requests, defaultLimit)
if recommLimit != nil && len(recommLimit) != 0 {
// 设置伸缩后的limit
resources[i].Limits = recommLimit
if len(anotation) > 0 {
annotations[container.Name] = anotation
}
} else {
resources[i].Limits = resources[i].Requests
annotations[container.Name] = []string{"EmptydefaultLimits, set to the same with Requests"}
}
//}
klog.V(4).Infof("container(%s) of pod(%s/%s)'s recommendation: request-%v limits-%v", container.Name, pod.Namespace, pod.Name, resources[i].Requests, resources[i].Limits)
}
return resources, annotations
}
|
// Copyright (C) 2015 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func checkDirExists(t *testing.T, dir, name, base string) {
i, err := os.Stat(dir)
assert.Nil(t, err)
assert.Equal(t, i.Name(), name)
assert.True(t, i.IsDir())
assert.Equal(t, dir, base)
}
func TestCreateLoggerDir(t *testing.T) {
originalBaseLogDir := baseLogDir
baseLogDir = "."
os.RemoveAll(dirName)
// The directory hasn't been created yet.
dir, _ := createLoggerDir()
formatted := time.Now().Format(dateISO)
base := fmt.Sprintf("%v/%v", dirName, formatted)
checkDirExists(t, dir, formatted, base)
// Ok, so now the directory exists but it's a regular file.
os.RemoveAll(dirName)
ioutil.WriteFile(dirName, []byte("hello"), 0777)
i, err := os.Stat(dirName)
assert.Nil(t, err)
assert.Equal(t, i.Name(), "ufm")
assert.False(t, i.IsDir())
dir, _ = createLoggerDir()
checkDirExists(t, dir, formatted, base)
// Let's create a couple of theses guys, so we make sure that there will be
// no name clashes.
os.RemoveAll(dirName)
dir1, _ := createLoggerDir()
checkDirExists(t, dir1, formatted, base)
dir2, _ := createLoggerDir()
formatted2 := fmt.Sprintf("%v.1", formatted)
base2 := fmt.Sprintf("%v/%v", dirName, formatted2)
checkDirExists(t, dir2, formatted2, base2)
dir3, _ := createLoggerDir()
formatted3 := fmt.Sprintf("%v.2", formatted)
base3 := fmt.Sprintf("%v/%v", dirName, formatted3)
checkDirExists(t, dir3, formatted3, base3)
// Now let's try to create it in some impossible place.
os.RemoveAll(dirName)
baseLogDir = "/this/directory/does/not/exist"
dir, _ = createLoggerDir()
assert.Empty(t, dir)
baseLogDir = originalBaseLogDir
os.RemoveAll(dirName)
}
func TestString(t *testing.T) {
originalBaseLogDir := baseLogDir
baseLogDir = "."
os.RemoveAll(dirName)
dir, _ := createLoggerDir()
logger := &Logger{Dir: dir}
logger.String("unit", "string")
c, _ := ioutil.ReadFile(fmt.Sprintf("%v/%v.txt", logger.Dir, "unit"))
assert.Equal(t, string(c), "string")
baseLogDir = originalBaseLogDir
os.RemoveAll(dirName)
}
func TestBuffer(t *testing.T) {
originalBaseLogDir := baseLogDir
baseLogDir = "."
os.RemoveAll(dirName)
dir, _ := createLoggerDir()
logger := &Logger{Dir: dir}
logger.Buffer("unit", bytes.NewBufferString("string"))
c, _ := ioutil.ReadFile(fmt.Sprintf("%v/%v.txt", logger.Dir, "unit"))
assert.Equal(t, string(c), "string")
baseLogDir = originalBaseLogDir
os.RemoveAll(dirName)
}
|
package configuration
import "github.com/ftarlao/goblocksync/utils"
// Hardcoded constants
const MajorVersion = 0
const Version = 1
const PatchVersion = 0
// Number of hashes inside one HashGroupMessage
const HashGroupMessageSize = 200
//Bytes for buffered hashes (64M)
const HashMaxBytes = 64 * utils.MB
//Bytes for buffered queued data (64M)
const DataMaxBytes = 64 * utils.MB
// Hash size [bytes], this is currently used by the dumb hash function
const HashSize = 32
// Size of the HashGroupMessage channel buffer (max number elements in the channel)
const HashGroupChannelSize = HashMaxBytes / (HashGroupMessageSize * HashSize)
var SupportedProtocols = []int{1}
// Max number of messages in the message queue, this should be only a small buffer (we have TCP buffers, other queues..)
// The effective max size [bytes] depends on the message types, max block size.. it should range (approximately) between:
// BlockSize * NetworkChannelsSize > size_bytes > HashGroupMessageSize * HashSize * NetworkChannelsSize
const NetworkMaxBytes = 32 * utils.MB
|
// Copyright 2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"fmt"
"net"
"strings"
"time"
pb "github.com/cilium/hubble/api/v1/flow"
"github.com/cilium/hubble/api/v1/observer"
v1 "github.com/cilium/hubble/pkg/api/v1"
"github.com/cilium/hubble/pkg/container"
"github.com/cilium/hubble/pkg/filters"
"github.com/cilium/hubble/pkg/ipcache"
"github.com/cilium/hubble/pkg/logger"
"github.com/cilium/hubble/pkg/metrics"
"github.com/cilium/hubble/pkg/parser"
parserErrors "github.com/cilium/hubble/pkg/parser/errors"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/math"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/gogo/protobuf/types"
"go.uber.org/zap"
)
type ciliumClient interface {
EndpointList() ([]*models.Endpoint, error)
GetEndpoint(id uint64) (*models.Endpoint, error)
GetIdentity(id uint64) (*models.Identity, error)
GetFqdnCache() ([]*models.DNSLookup, error)
GetIPCache() ([]*models.IPListEntry, error)
}
type endpointsHandler interface {
SyncEndpoints([]*v1.Endpoint)
UpdateEndpoint(*v1.Endpoint)
MarkDeleted(*v1.Endpoint)
FindEPs(epID uint64, ns, pod string) []v1.Endpoint
GetEndpoint(ip net.IP) (endpoint *v1.Endpoint, ok bool)
GarbageCollect()
}
type fqdnCache interface {
InitializeFrom(entries []*models.DNSLookup)
AddDNSLookup(epID uint64, lookupTime time.Time, domainName string, ips []net.IP, ttl uint32)
GetNamesOf(epID uint64, ip net.IP) []string
}
// ObserverServer is a server that can store events in memory
type ObserverServer struct {
// ring buffer that contains the references of all flows
ring *container.Ring
// events is the channel used by the writer(s) to send the flow data
// into the observer server.
events chan *pb.Payload
// stopped is mostly used in unit tests to signalize when the events
// channel is empty, once it's closed.
stopped chan struct{}
// ciliumClient will connect to Cilium to pool cilium endpoint information
ciliumClient ciliumClient
// endpoints contains a slice of all endpoints running the node where
// hubble is running.
endpoints endpointsHandler
// fqdnCache contains the responses of all intercepted DNS lookups
// performed by local endpoints
fqdnCache fqdnCache
// ipcache is a mirror of Cilium's IPCache
ipcache *ipcache.IPCache
// epAdd is a channel used to exchange endpoint events from Cilium
endpointEvents chan monitorAPI.AgentNotify
// logRecord is a channel used to exchange L7 DNS requests seens from the
// monitor
logRecord chan monitor.LogRecordNotify
log *zap.Logger
// channel to receive events from observer server.
eventschan chan *observer.GetFlowsResponse
// payloadParser decodes pb.Payload into pb.Flow
payloadParser *parser.Parser
}
// NewServer returns a server that can store up to the given of maxFlows
// received.
func NewServer(
ciliumClient ciliumClient,
endpoints endpointsHandler,
ipCache *ipcache.IPCache,
fqdnCache fqdnCache,
payloadParser *parser.Parser,
maxFlows int,
) *ObserverServer {
return &ObserverServer{
log: logger.GetLogger(),
ring: container.NewRing(maxFlows),
// have a channel with 1% of the max flows that we can receive
events: make(chan *pb.Payload, uint64(math.IntMin(maxFlows/100, 100))),
stopped: make(chan struct{}),
ciliumClient: ciliumClient,
endpoints: endpoints,
ipcache: ipCache,
fqdnCache: fqdnCache,
endpointEvents: make(chan monitorAPI.AgentNotify, 100),
logRecord: make(chan monitor.LogRecordNotify, 100),
eventschan: make(chan *observer.GetFlowsResponse, 100),
payloadParser: payloadParser,
}
}
// Start starts the server to handle the events sent to the events channel as
// well as handle events to the EpAdd and EpDel channels.
func (s *ObserverServer) Start() {
go s.syncEndpoints()
go s.syncFQDNCache()
go s.consumeEndpointEvents()
go s.consumeLogRecordNotifyChannel()
for pl := range s.events {
flow, err := s.decodeFlow(pl)
if err != nil {
if !parserErrors.IsErrInvalidType(err) {
s.log.Debug("failed to decode payload", zap.ByteString("data", pl.Data), zap.Error(err))
}
continue
}
metrics.ProcessFlow(flow)
s.ring.Write(&v1.Event{
Timestamp: pl.Time,
Event: flow,
})
}
close(s.stopped)
}
// StartMirroringIPCache will obtain an initial IPCache snapshot from Cilium
// and then start mirroring IPCache events based on IPCacheNotification sent
// through the ipCacheEvents channels. Only messages of type
// `AgentNotifyIPCacheUpserted` and `AgentNotifyIPCacheDeleted` should be sent
// through that channel. This function assumes that the caller is already
// connected to Cilium Monitor, i.e. no IPCacheNotification must be lost after
// calling this method.
func (s *ObserverServer) StartMirroringIPCache(ipCacheEvents <-chan monitorAPI.AgentNotify) {
go s.syncIPCache(ipCacheEvents)
}
// GetLogRecordNotifyChannel returns the event channel to receive
// monitorAPI.LogRecordNotify events.
func (s *ObserverServer) GetLogRecordNotifyChannel() chan<- monitor.LogRecordNotify {
return s.logRecord
}
// GetEventsChannel returns the event channel to receive pb.Payload events.
func (s *ObserverServer) GetEventsChannel() chan<- *pb.Payload {
return s.events
}
// GetEndpointEventsChannel returns a channel that should be used to send
// AgentNotifyEndpoint* events when an endpoint is added, deleted or updated
// in Cilium.
func (s *ObserverServer) GetEndpointEventsChannel() chan<- monitorAPI.AgentNotify {
return s.endpointEvents
}
func (s *ObserverServer) decodeFlow(pl *pb.Payload) (*pb.Flow, error) {
// TODO: Pool these instead of allocating new flows each time.
f := &pb.Flow{}
err := s.payloadParser.Decode(pl, f)
if err != nil {
return nil, err
}
return f, nil
}
// ServerStatus should have a comment, apparently. It returns the server status.
func (s *ObserverServer) ServerStatus(
ctx context.Context, req *observer.ServerStatusRequest,
) (*observer.ServerStatusResponse, error) {
res := &observer.ServerStatusResponse{
MaxFlows: s.ring.Cap(),
NumFlows: s.ring.Len(),
}
return res, nil
}
func logFilters(filters []*pb.FlowFilter) string {
var s []string
for _, f := range filters {
s = append(s, f.String())
}
return "{" + strings.Join(s, ",") + "}"
}
// GetFlows implements the proto method for client requests.
func (s *ObserverServer) GetFlows(
req *observer.GetFlowsRequest,
server observer.Observer_GetFlowsServer,
) (err error) {
reply, err := getFlows(server.Context(), s.log, s.ring, req)
if err != nil {
return err
}
for {
select {
case <-server.Context().Done():
return nil
case rep, ok := <-reply:
if !ok {
return nil
}
err := server.Send(&observer.GetFlowsResponse{
ResponseTypes: &observer.GetFlowsResponse_Flow{
Flow: rep,
},
})
if err != nil {
return err
}
}
}
}
func getUntil(req *observer.GetFlowsRequest, defaultTime *types.Timestamp) (time.Time, error) {
until := req.GetUntil()
if until == nil {
until = defaultTime
}
return types.TimestampFromProto(until)
}
func getBufferCh(ctx context.Context, ring *container.Ring, req *observer.GetFlowsRequest) (ch <-chan *v1.Event, stop context.CancelFunc, err error) {
stop = func() {}
// s.ring.ReadFrom reads the values up to the last written index, i.e.,
// it will read all values from the given interval:
// [ lastWrite, s.ring.write [
lastWrite := ring.LastWriteParallel() + 1
readIdx := lastWrite - req.Number
switch {
case req.Follow:
ch = ring.ReadFrom(ctx.Done(), readIdx)
case req.Number != 0:
var ctx1 context.Context
ctx1, stop = context.WithCancel(ctx)
ch = ring.ReadFrom(ctx1.Done(), readIdx)
default:
beginning, err := types.TimestampFromProto(req.GetSince())
if err != nil {
return nil, nil, err
}
end, err := getUntil(req, types.TimestampNow())
if err != nil {
return nil, nil, err
}
timestampCh := make(chan *v1.Event, 1000)
ch = timestampCh
var ctx1 context.Context
ctx1, stop = context.WithCancel(ctx)
go func() {
defer close(timestampCh)
for lastWrite := ring.LastWriteParallel(); ; lastWrite-- {
e, ok := ring.Read(lastWrite)
// if the buffer was not full yet we can get nil payloads
if e == nil || e.Event == nil || !ok {
return
}
ts, err := types.TimestampFromProto(e.GetFlow().GetTime())
if err != nil {
return
}
if beginning.Before(ts) && end.After(ts) {
select {
case <-ctx1.Done():
return
case timestampCh <- e:
}
}
}
}()
}
return ch, stop, nil
}
// getFlow returns the flows either depending on the requests performed.
func getFlows(
ctx context.Context,
log *zap.Logger,
ring *container.Ring,
req *observer.GetFlowsRequest,
) (chan *pb.Flow, error) {
start := time.Now()
i := uint64(0)
defer func() {
size := ring.Cap()
took := time.Now().Sub(start)
log.Debug(
"GetFlows finished",
zap.Uint64("number_of_flows", i),
zap.Uint64("buffer_size", size),
zap.String("whitelist", logFilters(req.Whitelist)),
zap.String("blacklist", logFilters(req.Blacklist)),
zap.Duration("took", took),
)
}()
whitelist, err := filters.BuildFilterList(req.Whitelist)
if err != nil {
return nil, err
}
blacklist, err := filters.BuildFilterList(req.Blacklist)
if err != nil {
return nil, err
}
log.Debug("filters", zap.String("req", fmt.Sprintf("%+v", req)))
log.Debug("whitelist", zap.String("whitelist", fmt.Sprintf("%+v", whitelist)))
log.Debug("blacklist", zap.String("blacklist", fmt.Sprintf("%+v", blacklist)))
ch, stop, err := getBufferCh(ctx, ring, req)
if err != nil {
return nil, err
}
reply := make(chan *pb.Flow, 1)
go func() {
defer close(reply)
defer stop()
for e := range ch {
if req.Number != 0 && !req.Follow {
i++
if i >= req.Number {
// stop the channel buffer because we have reached
// the number of requested flows.
stop()
if i > req.Number {
// We will 'continue' since 'ch' might have flows and we
// need to empty that channel.
return
}
}
}
if e == nil {
continue
}
flow, ok := e.Event.(*pb.Flow)
if !ok || !filters.Apply(whitelist, blacklist, e) {
continue
}
select {
case reply <- flow:
// We have sent all expected flows so we can return already
if req.Number != 0 && i >= req.Number {
return
}
case <-ctx.Done():
return
}
}
}()
return reply, nil
}
|
// Copyright 2021 Red Hat, Inc. and/or its affiliates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package installers
import (
"errors"
"fmt"
"sigs.k8s.io/controller-runtime/pkg/client"
ispn "github.com/kiegroup/kogito-operator/core/infrastructure/infinispan/v1"
"github.com/kiegroup/kogito-operator/test/pkg/config"
"github.com/kiegroup/kogito-operator/test/pkg/framework"
coreapps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
)
var (
// infinispanOlmClusterWideInstaller installs Infinispan cluster wide using OLM
infinispanOlmClusterWideInstaller = OlmClusterWideServiceInstaller{
SubscriptionName: infinispanOperatorSubscriptionName,
Channel: infinispanOperatorSubscriptionChannel,
Catalog: framework.GetCommunityCatalog,
InstallationTimeoutInMinutes: 10,
GetAllClusterWideOlmCrsInNamespace: getInfinispanCrsInNamespace,
}
// infinispanYamlNamespacedInstaller installs Infinispan namespaced using YAMLs
infinispanYamlNamespacedInstaller = YamlNamespacedServiceInstaller{
InstallNamespacedYaml: installInfinispanUsingYaml,
WaitForNamespacedServiceRunning: waitForInfinispanUsingYamlRunning,
GetAllNamespaceYamlCrs: getInfinispanCrsInNamespace,
UninstallNamespaceYaml: uninstallInfinispanUsingYaml,
NamespacedYamlServiceName: infinispanOperatorServiceName,
}
infinispanOperatorSubscriptionName = "infinispan"
infinispanOperatorSubscriptionChannel = "2.3.x"
infinispanOperatorGitHubBranch = "2.0.x"
infinispanOperatorDeployFilesURI = fmt.Sprintf("https://raw.githubusercontent.com/infinispan/infinispan-operator/%s/deploy/", infinispanOperatorGitHubBranch)
infinispanOperatorServiceName = "Infinispan"
)
// GetInfinispanInstaller returns Infinispan installer
func GetInfinispanInstaller() (ServiceInstaller, error) {
if config.IsInfinispanInstalledByYaml() {
return &infinispanYamlNamespacedInstaller, nil
}
if config.IsInfinispanInstalledByOlm() {
return &infinispanOlmClusterWideInstaller, nil
}
return nil, errors.New("No Infinispan operator installer available for provided configuration")
}
func installInfinispanUsingYaml(namespace string) error {
framework.GetLogger(namespace).Info("Deploy Infinispan from yaml files", "file uri", infinispanOperatorDeployFilesURI)
infinispanClusterResourceName := getInfinispanClusterResourceName(namespace)
if !framework.IsInfinispanAvailable(namespace) {
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"crds/infinispan.org_caches_crd.yaml", &apiextensionsv1beta1.CustomResourceDefinition{}, nil); err != nil {
return err
}
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"crds/infinispan.org_infinispans_crd.yaml", &apiextensionsv1beta1.CustomResourceDefinition{}, nil); err != nil {
return err
}
}
err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"clusterrole.yaml", &rbac.ClusterRole{}, func(object interface{}) {
// Prefix name to be unique to allow concurrent installations
object.(*rbac.ClusterRole).Name = infinispanClusterResourceName
})
if err != nil {
return err
}
err = framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"clusterrole_binding.yaml", &rbac.ClusterRoleBinding{}, func(object interface{}) {
// Prefix name to be unique to allow concurrent installations
object.(*rbac.ClusterRoleBinding).Name = infinispanClusterResourceName
// Set proper namespace for binding to service account
object.(*rbac.ClusterRoleBinding).Subjects[0].Namespace = namespace
})
if err != nil {
return err
}
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"service_account.yaml", &corev1.ServiceAccount{}, nil); err != nil {
return err
}
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"role.yaml", &rbac.Role{}, nil); err != nil {
return err
}
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"role_binding.yaml", &rbac.RoleBinding{}, nil); err != nil {
return err
}
if err := framework.LoadResource(namespace, infinispanOperatorDeployFilesURI+"operator.yaml", &coreapps.Deployment{}, nil); err != nil {
return err
}
return nil
}
func waitForInfinispanUsingYamlRunning(namespace string) error {
return framework.WaitForPodsWithLabel(namespace, "name", "infinispan-operator", 1, 3)
}
func uninstallInfinispanUsingYaml(namespace string) error {
framework.GetLogger(namespace).Info("Uninstalling Infinispan")
infinispanClusterResourceName := getInfinispanClusterResourceName(namespace)
var originalError error
output, err := framework.CreateCommand("oc", "delete", "-f", infinispanOperatorDeployFilesURI+"operator.yaml", "-n", namespace).WithLoggerContext(namespace).Execute()
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Deleting Infinispan operator failed, output: %s", output))
if originalError == nil {
originalError = err
}
}
output, err = framework.CreateCommand("oc", "delete", "-f", infinispanOperatorDeployFilesURI+"role_binding.yaml", "-n", namespace).WithLoggerContext(namespace).Execute()
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Deleting Infinispan role binding failed, output: %s", output))
if originalError == nil {
originalError = err
}
}
output, err = framework.CreateCommand("oc", "delete", "-f", infinispanOperatorDeployFilesURI+"role.yaml", "-n", namespace).WithLoggerContext(namespace).Execute()
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Deleting Infinispan role failed, output: %s", output))
if originalError == nil {
originalError = err
}
}
output, err = framework.CreateCommand("oc", "delete", "-f", infinispanOperatorDeployFilesURI+"service_account.yaml", "-n", namespace).WithLoggerContext(namespace).Execute()
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Deleting Infinispan service account failed, output: %s", output))
if originalError == nil {
originalError = err
}
}
crb, err := framework.GetClusterRoleBinding(infinispanClusterResourceName)
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Cannot retrieve ClusterRoleBinding %s", infinispanClusterResourceName))
if originalError == nil {
originalError = err
}
} else {
if err = framework.DeleteObject(crb); err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Cannot delete ClusterRoleBinding %s", infinispanClusterResourceName))
if originalError == nil {
originalError = err
}
}
}
cr, err := framework.GetClusterRole(infinispanClusterResourceName)
if err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Cannot retrieve ClusterRole %s", infinispanClusterResourceName))
if originalError == nil {
originalError = err
}
} else {
if err = framework.DeleteObject(cr); err != nil {
framework.GetLogger(namespace).Error(err, fmt.Sprintf("Cannot delete ClusterRole %s", infinispanClusterResourceName))
if originalError == nil {
originalError = err
}
}
}
return originalError
}
func getInfinispanClusterResourceName(namespace string) string {
return "infinispan-" + namespace
}
func getInfinispanCrsInNamespace(namespace string) ([]client.Object, error) {
var crs []client.Object
infinispans := &ispn.InfinispanList{}
if err := framework.GetObjectsInNamespace(namespace, infinispans); err != nil {
return nil, err
}
for i := range infinispans.Items {
crs = append(crs, &infinispans.Items[i])
}
return crs, nil
}
|
package ebs
import (
"github.com/openebs/mtest/driver"
"github.com/openebs/mtest/util"
)
const (
// Name of this executor
// This executor will be known as this to the outside world
EBS_SNAP_REMOVE_EXEC = "ebs.snapshot.remove.executor"
)
// This is a EBS driver executor.
type SnapshotRemover struct {
d *EBSDriver
}
func init() {
// Register by passing the name of this executor
// and its initializing function definition.
RegisterAsEBSExecutor(EBS_SNAP_REMOVE_EXEC, SnapshotRemoverInit)
}
// The initializing function of SnapshotCreator executor.
func SnapshotRemoverInit(ebsDriver *EBSDriver) (driver.Executor, error) {
return &SnapshotRemover{
d: ebsDriver,
}, nil
}
func (s *SnapshotRemover) Exec(req driver.Request) (*driver.Response, error) {
s.d.mutex.Lock()
defer s.d.mutex.Unlock()
id := req.Name
volumeID, err := util.GetFieldFromOpts(OPT_VOLUME_NAME, req.Options)
if err != nil {
return nil, err
}
snapshot, volume, err := s.d.getSnapshotAndVolume(id, volumeID)
if err != nil {
return nil, err
}
log.Debugf("Removing snapshot %v(%v) of volume %v(%v)", id, snapshot.EBSID, volumeID, volume.EBSID)
delete(volume.Snapshots, id)
err = util.ObjectSave(volume)
if err != nil {
return nil, err
}
return &driver.Response{}, nil
}
|
package plist
// RawPlistValue is a raw encoded Plist object. It implements Marshaler and
// Unmarshaler and can be used to delay Plist decoding or precompute a Plist
// encoding using DecodeElement or EncodeElement respectively.
type RawPlistValue plistValue
func (r *RawPlistValue) UnmarshalPlist(p *Decoder, start *RawPlistValue) error {
*r = *start
return nil
}
func (r RawPlistValue) MarshalPlist(p *Encoder, start *RawPlistValue) error {
*start = r
return nil
}
|
package variable_length
// 可変長引数(interface{}のとき)
// 正常系
type IVariableInterface interface {
Invoke(a int, opt ...interface{}) string
}
type VariableInterface struct {
}
func (VariableInterface) Invoke(a int, opt ...interface{}) string {
return ""
}
// 異常系
type NIVariableInterface interface { // want "not implemented"
Invoke(a string, opt ...interface{}) string
Invoke2(a string, opt ...interface{}) string
}
type NVariableInterface struct {
}
func (NVariableInterface) Invoke(a string, opt ...interface{}) string {
return ""
}
|
package concepts
import (
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
fthealth "github.com/Financial-Times/go-fthealth/v1_1"
uuid "github.com/google/uuid"
"github.com/husobee/vestigo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
const expectedUserAgent = "UPP internal-concordances"
func TestGetConcordancesEmptyResponse(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesEmptyResponse", requestedUUIDs)
serverMock.On("getResponse").Return(`{}`, http.StatusOK)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
identifiers, err := concordances.GetConcordances("tid_TestGetConcordancesEmptyResponse", NoAuthority, requestedUUIDs...)
assert.NoError(t, err)
assert.Len(t, identifiers, 0)
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordancesAtLeastOneNonEmptyID(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{"", "", uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesAtLeastOneNonEmptyID", requestedUUIDs)
serverMock.On("getResponse").Return(`{}`, http.StatusOK) // respond with an empty body, so no data will be returned, but if the test passes then the test case is successful.
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
identifiers, err := concordances.GetConcordances("tid_TestGetConcordancesAtLeastOneNonEmptyID", NoAuthority, requestedUUIDs...)
assert.NoError(t, err)
assert.Len(t, identifiers, 0)
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordances(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetEmptyConcordances", requestedUUIDs)
concordancesResp, err := ioutil.ReadFile("./_fixtures/concordances_response.json")
require.NoError(t, err)
serverMock.On("getResponse").Return(string(concordancesResp), http.StatusOK)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
identifiers, err := concordances.GetConcordances("tid_TestGetEmptyConcordances", NoAuthority, requestedUUIDs...)
assert.NoError(t, err)
assert.Len(t, identifiers, 1)
ids, ok := identifiers["2753c50c-b256-4814-9f0d-65c8e755aa14"]
assert.True(t, ok)
assert.Len(t, ids, 4)
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordancesByAuthority(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesByAuthority", requestedUUIDs)
concordancesResp, err := ioutil.ReadFile("./_fixtures/concordances_by_authority_response.json")
require.NoError(t, err)
serverMock.On("getResponse").Return(string(concordancesResp), http.StatusOK)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
uppAuthority := "http://api.ft.com/system/UPP"
identifiers, err := concordances.GetConcordances("tid_TestGetConcordancesByAuthority", uppAuthority, requestedUUIDs...)
assert.NoError(t, err)
assert.Len(t, identifiers, 1)
ids, ok := identifiers["2753c50c-b256-4814-9f0d-65c8e755aa14"]
assert.True(t, ok)
assert.Len(t, ids, 1)
assert.Equal(t, uppAuthority, ids[0].Authority)
assert.Equal(t, "6b43a14b-a5e0-3b63-a428-aa55def05fcb", ids[0].IdentifierValue)
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordancesFailsWhenNoIDsSupplied(t *testing.T) {
concordances := NewConcordances(&http.Client{}, "")
_, err := concordances.GetConcordances("tid_TestGetConcordancesFailsWhenNoIDsSupplied", NoAuthority)
assert.EqualError(t, err, ErrNoConceptsToSearch.Error())
}
func TestGetConcordancesFailsWhenEmptyIDsSupplied(t *testing.T) {
concordances := NewConcordances(&http.Client{}, "")
_, err := concordances.GetConcordances("tid_TestGetConcordancesFailsWhenEmptyIDsSupplied", NoAuthority, "", "")
assert.EqualError(t, err, ErrConceptIDsAreEmpty.Error())
}
func TestGetConcordancesFailsInvalidURL(t *testing.T) {
concordances := NewConcordances(&http.Client{}, ":#") // this triggers a invalid url during the http.NewRequest() line
_, err := concordances.GetConcordances("tid_TestGetConcordancesFailsInvalidURL", NoAuthority, uuid.New().String())
assert.Error(t, err)
}
func TestGetConcordancesRequestFails(t *testing.T) {
concordances := NewConcordances(&http.Client{}, "#:") // this triggers a protocol error in the client.Do()
_, err := concordances.GetConcordances("tid_TestGetConcordancesRequestFails", NoAuthority, uuid.New().String())
assert.Error(t, err)
}
func TestGetConcordancesResponseJSONInvalid(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{"", "", uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesResponseJSONInvalid", requestedUUIDs)
serverMock.On("getResponse").Return(`{`, http.StatusOK)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
_, err := concordances.GetConcordances("tid_TestGetConcordancesResponseJSONInvalid", NoAuthority, requestedUUIDs...)
assert.Error(t, err)
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordancesFailedResponse(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{"", "", uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesFailedResponse", requestedUUIDs)
serverMock.On("getResponse").Return(`{"message":"uh oh"}`, http.StatusServiceUnavailable)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
_, err := concordances.GetConcordances("tid_TestGetConcordancesFailedResponse", NoAuthority, requestedUUIDs...)
assert.EqualError(t, err, "503 Service Unavailable: uh oh")
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
func TestGetConcordancesFailedResponseMessageDecodingAlsoFailed(t *testing.T) {
serverMock := new(mockPublicConcordancesServer)
requestedUUIDs := []string{"", "", uuid.New().String()}
serverMock.On("getRequest").Return("tid_TestGetConcordancesFailedResponse", requestedUUIDs)
serverMock.On("getResponse").Return(`{`, http.StatusBadRequest)
server := serverMock.startServer(t)
defer server.Close()
concordances := NewConcordances(&http.Client{}, server.URL)
_, err := concordances.GetConcordances("tid_TestGetConcordancesFailedResponse", NoAuthority, requestedUUIDs...)
assert.EqualError(t, err, "400 Bad Request: Failed to decode message from response")
serverMock.AssertExpectations(t) // failure here means the concordances API has not been called
}
type mockPublicConcordancesServer struct {
mock.Mock
}
func (m *mockPublicConcordancesServer) getRequest() (string, []string) {
args := m.Called()
return args.String(0), args.Get(1).([]string)
}
func (m *mockPublicConcordancesServer) getResponse() (string, int) {
args := m.Called()
return args.String(0), args.Int(1)
}
func (m *mockPublicConcordancesServer) startServer(t *testing.T) *httptest.Server {
r := vestigo.NewRouter()
r.Get("/concordances", func(w http.ResponseWriter, r *http.Request) {
tid, expectedIDs := m.getRequest()
assert.Equal(t, tid, r.Header.Get("X-Request-Id"))
assert.Equal(t, expectedUserAgent, r.Header.Get("User-Agent"))
query := r.URL.Query()
authorityParam, foundAuthority := query[authorityQueryParam]
queryParam := concordancesQueryParam
if foundAuthority {
queryParam = identifierValueQueryParam
assert.NotEmpty(t, authorityParam)
assert.Empty(t, query[concordancesQueryParam])
}
actualIDs, foundConceptId := query[queryParam]
assert.True(t, foundConceptId)
assert.Equal(t, expectedIDs, actualIDs)
json, status := m.getResponse()
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(status)
w.Write([]byte(json))
})
return httptest.NewServer(r)
}
func TestConcordanceHappyCheck(t *testing.T) {
gtgServerMock := newPublicConcordanceAPIGTGMock(t, http.StatusOK)
defer gtgServerMock.Close()
search := NewConcordances(&http.Client{}, gtgServerMock.URL)
check := search.Check()
assertConcordanceCheckConsistency(t, check)
msg, err := check.Checker()
assert.NoError(t, err)
assert.Equal(t, "Public Concordance API is good to go", msg)
}
func TestConcordanceUnhappyCheckDueInvalidURL(t *testing.T) {
search := NewConcordances(&http.Client{}, ":#")
check := search.Check()
assertConcordanceCheckConsistency(t, check)
_, err := check.Checker()
var urlErr *url.Error
assert.True(t, errors.As(err, &urlErr))
assert.Equal(t, urlErr.Op, "parse")
}
func TestConcordanceUnhappyCheckDueHTTPCallError(t *testing.T) {
search := NewConcordances(&http.Client{}, "")
check := search.Check()
assertConcordanceCheckConsistency(t, check)
_, err := check.Checker()
var urlErr *url.Error
assert.True(t, errors.As(err, &urlErr))
assert.Equal(t, urlErr.Op, "Get")
}
func TestConcordanceUnhappyCheckDueNon200HTTPStatus(t *testing.T) {
gtgServerMock := newConceptSearchAPIGTGMock(t, http.StatusServiceUnavailable)
defer gtgServerMock.Close()
search := NewConcordances(&http.Client{}, gtgServerMock.URL)
check := search.Check()
assertConcordanceCheckConsistency(t, check)
_, err := check.Checker()
assert.EqualError(t, err, "GTG returned a non-200 HTTP status: 503")
}
func assertConcordanceCheckConsistency(t *testing.T, check fthealth.Check) {
assert.Equal(t, "public-concordance-api", check.ID)
assert.Equal(t, "Concorded concepts can not be returned to clients", check.BusinessImpact)
assert.Equal(t, "Public Concordance API Healthcheck", check.Name)
assert.Equal(t, "https://runbooks.in.ft.com/internal-concordances", check.PanicGuide)
assert.Equal(t, uint8(2), check.Severity)
assert.Equal(t, "Public Concordance API is not available", check.TechnicalSummary)
}
func newPublicConcordanceAPIGTGMock(t *testing.T, status int) *httptest.Server {
r := vestigo.NewRouter()
r.Get("/__gtg", func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, expectedUserAgent, r.Header.Get("User-Agent"))
w.WriteHeader(status)
})
return httptest.NewServer(r)
}
|
// Copyright 2016 Andreas Pannewitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package d
import (
"github.com/GoLangsam/dk-7.2.2.1/internal/x" // all we need
)
// ===========================================================================
// Next chooses another primary Item index
// among the remaining items and returns it for Dance(i).
//
// Chosen is the first item with the smallest number of options.
// (Sometimes called "MRV heuristic", or "S heuristic".)
//
// Next panics iff the list at root is empty
// as no item can be found.
func (a tach) Next(root *x.Item) (here x.Index) {
Size := x.Index(len(a.OptaS)) // larger than anything we'll find.
if root.Next == 0 {
die("Choice called on empty list!")
}
var size x.Index
for curr := root.Next; curr != 0; curr = a.ItemS[curr].Next {
// TODO: the "non-sharp/sharp preference"-Heuristics
// if a.NameS[curr] does/doesn't start with `#` {
// size = size + len(a.Optas.MarkS) - 1 // #-of-options
// }
size = a.OptaS[curr].Root
if size < Size {
Size, here = size, curr
}
}
if here == 0 {
die("Choice found no item!")
}
qqq("Chosen:", tab, here)
return
}
// ========================================================
|
package main
import (
"fmt"
"math"
"os"
"time"
"github.com/c-14/grue/config"
"github.com/mmcdole/gofeed"
"gopkg.in/gomail.v2"
)
type FeedFetcher struct {
mailer gomail.Sender
init bool
sem chan int
finished chan int
}
type RSSFeed struct {
config config.AccountConfig
LastFetched int64 `json:",omitempty"`
LastQueried int64 `json:",omitempty"`
NextQuery int64 `json:",omitempty"`
Tries int `json:",omitempty"`
GUIDList map[string]struct{} `json:",omitempty"`
}
type DateType int
const (
NoDate DateType = iota
DateNewer
DateOlder
)
func hasNewerDate(item *gofeed.Item, lastFetched int64) (time.Time, DateType) {
if item.PublishedParsed != nil {
if item.PublishedParsed.Unix() > lastFetched {
return *item.PublishedParsed, DateNewer
} else {
return *item.PublishedParsed, DateOlder
}
} else if item.UpdatedParsed != nil {
if item.UpdatedParsed.Unix() > lastFetched {
return *item.UpdatedParsed, DateNewer
} else {
return *item.UpdatedParsed, DateOlder
}
} else if date, exists := item.Extensions["dc"]["date"]; exists {
dateParsed, err := time.Parse(time.RFC3339, date[0].Value)
if err != nil {
fmt.Printf("Can't parse (%v) as dc:date for (%v)\n", date, item.Link)
return time.Now(), NoDate
}
if dateParsed.Unix() > lastFetched {
return dateParsed, DateNewer
} else {
return dateParsed, DateOlder
}
}
return time.Now(), NoDate
}
func fetchFeed(fp FeedFetcher, feedName string, account *RSSFeed, config *config.GrueConfig) {
// if account.UserAgent != nil {
// feed.SetUserAgent(*account.UserAgent)
// }
now := time.Now()
if account.NextQuery > now.Unix() {
<-fp.sem
fp.finished <- 1
return
}
parser := gofeed.NewParser()
feed, err := parser.ParseURL(account.config.URI)
account.LastQueried = now.Unix()
if err != nil {
if account.Tries > 0 {
account.NextQuery = now.Add(time.Duration(math.Exp2(float64(account.Tries+4))) * time.Minute).Unix()
}
account.Tries++
if account.Tries > 1 {
fmt.Printf("Caught error (#%d) when parsing %s: %s\n", account.Tries, account.config.URI, err)
}
<-fp.sem
fp.finished <- 1
return
}
account.NextQuery = 0
account.Tries = 0
guids := account.GUIDList
if float64(len(guids)) > 1.2*float64(len(feed.Items)) {
account.GUIDList = make(map[string]struct{})
}
for _, item := range feed.Items {
if fp.init {
account.GUIDList[item.GUID] = struct{}{}
} else {
_, exists := guids[item.GUID]
date, newer := hasNewerDate(item, account.LastFetched)
if !exists || (item.GUID == "" && newer == DateNewer) {
e := createEmail(feedName, feed, item, date, account.config, config)
err = e.Send(fp.mailer)
}
if err == nil {
account.GUIDList[item.GUID] = struct{}{}
} else {
fmt.Fprintln(os.Stderr, err)
break
}
}
}
if err == nil {
account.LastFetched = time.Now().Unix()
}
<-fp.sem
fp.finished <- 1
}
func fetchFeeds(conf *config.GrueConfig, init bool) error {
hist, err := ReadHistory()
if err != nil {
return err
}
var mailer gomail.Sender
if !init {
mailer, err = setupMailer(conf)
if err != nil {
return err
}
}
fp := FeedFetcher{init: init, mailer: mailer, sem: make(chan int, 10), finished: make(chan int)}
go func() {
for name, accountConfig := range conf.Accounts {
fp.sem <- 1
account, exist := hist.Feeds[name]
if !exist {
account = new(RSSFeed)
account.GUIDList = make(map[string]struct{})
hist.Feeds[name] = account
} else if len(account.GUIDList) == 0 {
account.GUIDList = make(map[string]struct{})
}
account.config = accountConfig
go fetchFeed(fp, name, account, conf)
}
}()
for range conf.Accounts {
<-fp.finished
}
return hist.Write()
}
func fetchName(conf *config.GrueConfig, name string, init bool) error {
accountConfig, ok := conf.Accounts[name]
if !ok {
return fmt.Errorf("%s: account does not exist", name)
}
hist, err := ReadHistory()
if err != nil {
return err
}
fp := FeedFetcher{
init: init,
sem: make(chan int, 1),
finished: make(chan int),
}
fp.sem <- 1
account, exist := hist.Feeds[name]
if !exist {
account = new(RSSFeed)
hist.Feeds[name] = account
}
if len(account.GUIDList) == 0 {
account.GUIDList = make(map[string]struct{})
}
account.config = accountConfig
go fetchFeed(fp, name, account, conf)
<-fp.finished
return hist.Write()
}
|
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package noise
import (
"context"
"crypto/rand"
"crypto/subtle"
"crypto/x509"
"encoding/binary"
"io"
"time"
"github.com/flynn/noise"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/blake3"
"github.com/zeebo/errs"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
)
var (
mon = monkit.Package()
// Error is a noise error class.
Error = errs.Class("noise")
)
// Config is useful for noiseconn Conns.
type Config = noise.Config
// Header is the drpcmigrate.Header prefix for DRPC over Noise.
const Header = "DRPC!N!1"
// DefaultProto is the protobuf enum value that specifies what noise
// protocol should be in use.
const DefaultProto = pb.NoiseProtocol_NOISE_IK_25519_CHACHAPOLY_BLAKE2B
// ProtoToConfig takes a pb.NoiseProtocol and returns a noise.Config
// that matches.
func ProtoToConfig(proto pb.NoiseProtocol) (noise.Config, error) {
switch proto {
case pb.NoiseProtocol_NOISE_IK_25519_CHACHAPOLY_BLAKE2B:
return noise.Config{
CipherSuite: noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashBLAKE2b),
Pattern: noise.HandshakeIK,
}, nil
case pb.NoiseProtocol_NOISE_IK_25519_AESGCM_BLAKE2B:
return noise.Config{
CipherSuite: noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashBLAKE2b),
Pattern: noise.HandshakeIK,
}, nil
case pb.NoiseProtocol_NOISE_UNSET:
return noise.Config{}, errs.New("unset noise protocol")
default:
return noise.Config{}, errs.New("unknown noise protocol %v", proto)
}
}
// ConfigToProto is the inverse of ProtoToConfig.
func ConfigToProto(cfg noise.Config) (pb.NoiseProtocol, error) {
noiseName := cfg.Pattern.Name + "_" + string(cfg.CipherSuite.Name())
switch noiseName {
case "IK_25519_ChaChaPoly_BLAKE2b":
return pb.NoiseProtocol_NOISE_IK_25519_CHACHAPOLY_BLAKE2B, nil
case "IK_25519_AESGCM_BLAKE2b":
return pb.NoiseProtocol_NOISE_IK_25519_AESGCM_BLAKE2B, nil
default:
return pb.NoiseProtocol_NOISE_UNSET, errs.New("unknown noise config %q", noiseName)
}
}
// ConfigToInfo turns a server-side noise Config into a *pb.NoiseInfo.
func ConfigToInfo(cfg noise.Config) (*pb.NoiseInfo, error) {
proto, err := ConfigToProto(cfg)
if err != nil {
return nil, err
}
return &pb.NoiseInfo{
Proto: proto,
PublicKey: cfg.StaticKeypair.Public,
}, nil
}
func identityBasedEntropy(context string, ident *identity.FullIdentity) (io.Reader, error) {
h := blake3.NewDeriveKey(context)
serialized, err := x509.MarshalPKCS8PrivateKey(ident.Key)
if err != nil {
return nil, Error.Wrap(err)
}
_, err = h.Write(serialized)
return h.Digest(), Error.Wrap(err)
}
// GenerateServerConf makes a server-side noise.Config from a full identity.
func GenerateServerConf(proto pb.NoiseProtocol, ident *identity.FullIdentity) (noise.Config, error) {
cfg, err := ProtoToConfig(proto)
if err != nil {
return noise.Config{}, err
}
cfg.Initiator = false
// we need a server-side keypair, and the way we're going to get it is a bit unusual.
//
// first, some context. as discussed in the noise design doc:
// https://github.com/storj/storj/blob/b022f371d24b64d9435dc02f5cc0de8bf6bff718/docs/blueprints/noise.md
// it is okay if the server key isn't always stable. we expect the noise key to
// rotate from time to time, and that a facility exists for clients to get an updated
// key. in the case of nodes, we expect this rotation to happen commonly enough that
// node checkins will update the key in the satellite's node table, but infrequently
// enough that we don't intend to do anything other than close the connection for key
// mismatch. getting a brand new key every process restart might be fine, but it is
// a tad uncomfortably often. ultimately, in the ideal case, the key is rotated with the
// same periodicity as the existing identity leaf key.
//
// so, we discussed potentially generating a key and saving it to disk, reusing it if the
// key is found, which is a nice, natural next step. there are two problems with this
// approach though:
// * when a writeable path is available (and filesystem config is threaded this far
// in the call chain), we are writing private key material, and it would be really nice
// for the storage node operator or satellite operator or whatever to know and choose
// the right permissions. we're trying to avoid operator steps with this rollout, but
// making sure the operator sets the right permission here is important. we could do
// a umask but ownership matters here too.
// * for some instances of this package, we don't always have a writable path at all
// (e.g. satellites).
//
// thinking laterally, the next idea was to try and use some existing entropy that had the
// filesystem permissions we wanted, and salt those with some randomness to generate a key
// that could be saved with world-readable permissions to a temp folder, but have the key
// require reading files that have permissions operators have already chosen.
//
// this is neat, but in the case of app servers for the satellite receiving noise
// connections (something we intend to do), how do app servers agree on the salt?
//
// so that led to here, why have the salt? the leaf private key the node already has is
// enough entropy that it's not revealable. if we used it as a complete source of
// entropy, and then made sure to initialize it with context so that you can't go
// backwards to the key without breaking cryptographic hmac, then we could extend our
// existing private key into more private keys.
//
// so in the below, we generate a blake3 generating reader from the leaf key of the existing
// key pair, then use that to deterministically generate a noise keypair.
entropy, err := identityBasedEntropy("storj noise server key", ident)
if err != nil {
return noise.Config{}, err
}
cfg.StaticKeypair, err = cfg.CipherSuite.GenerateKeypair(entropy)
if err != nil {
return noise.Config{}, Error.Wrap(err)
}
return cfg, nil
}
// GenerateInitiatorConf makes an initiator noise.Config that talks to the provided peer.
func GenerateInitiatorConf(peer *pb.NoiseInfo) (noise.Config, error) {
cfg, err := ProtoToConfig(peer.Proto)
if err != nil {
return noise.Config{}, err
}
keypair, err := cfg.CipherSuite.GenerateKeypair(rand.Reader)
if err != nil {
return noise.Config{}, err
}
cfg.StaticKeypair = keypair
cfg.PeerStatic = peer.PublicKey
cfg.Initiator = true
return cfg, nil
}
func signablePublicKey(ts time.Time, key []byte) []byte {
var buf [8]byte
tsnano := ts.UnixNano()
if tsnano < 0 {
tsnano = 0
}
binary.BigEndian.PutUint64(buf[:], uint64(tsnano))
return append(buf[:], key...)
}
// GenerateKeyAttestation will sign a given Noise public key using the
// Node's leaf key and certificate chain, generating a pb.NoiseKeyAttestation.
func GenerateKeyAttestation(ctx context.Context, ident *identity.FullIdentity, info *pb.NoiseInfo) (_ *pb.NoiseKeyAttestation, err error) {
defer mon.Task()(&ctx)(&err)
ts := time.Now()
signature, err := signing.SignerFromFullIdentity(ident).HashAndSign(ctx,
append([]byte("noise-key-attestation-v1:"), signablePublicKey(ts, info.PublicKey)...))
if err != nil {
return nil, Error.Wrap(err)
}
return &pb.NoiseKeyAttestation{
DeprecatedNodeId: ident.ID,
NodeCertchain: identity.EncodePeerIdentity(ident.PeerIdentity()),
NoiseProto: info.Proto,
NoisePublicKey: info.PublicKey,
Timestamp: ts,
Signature: signature,
}, nil
}
// ValidateKeyAttestation will confirm that a provided
// *pb.NoiseKeyAttestation was signed correctly.
func ValidateKeyAttestation(ctx context.Context, attestation *pb.NoiseKeyAttestation, expectedNodeID storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
peer, err := identity.DecodePeerIdentity(ctx, attestation.NodeCertchain)
if err != nil {
return Error.Wrap(err)
}
if subtle.ConstantTimeCompare(peer.ID.Bytes(), expectedNodeID.Bytes()) != 1 {
return Error.New("node id mismatch")
}
signee := signing.SigneeFromPeerIdentity(peer)
unsigned := signablePublicKey(attestation.Timestamp, attestation.NoisePublicKey)
err = signee.HashAndVerifySignature(ctx,
append([]byte("noise-key-attestation-v1:"), unsigned...),
attestation.Signature)
return Error.Wrap(err)
}
// GenerateSessionAttestation will sign a given Noise session handshake
// hash using the Node's leaf key and certificate chain, generating a
// pb.NoiseSessionAttestation.
func GenerateSessionAttestation(ctx context.Context, ident *identity.FullIdentity, handshakeHash []byte) (_ *pb.NoiseSessionAttestation, err error) {
defer mon.Task()(&ctx)(&err)
signature, err := signing.SignerFromFullIdentity(ident).HashAndSign(ctx,
append([]byte("noise-session-attestation-v1:"), handshakeHash...))
if err != nil {
return nil, Error.Wrap(err)
}
return &pb.NoiseSessionAttestation{
DeprecatedNodeId: ident.ID,
NodeCertchain: identity.EncodePeerIdentity(ident.PeerIdentity()),
NoiseHandshakeHash: handshakeHash,
Signature: signature,
}, nil
}
// ValidateSessionAttestation will confirm that a provided
// *pb.NoiseSessionAttestation was signed correctly.
func ValidateSessionAttestation(ctx context.Context, attestation *pb.NoiseSessionAttestation, expectedNodeID storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
peer, err := identity.DecodePeerIdentity(ctx, attestation.NodeCertchain)
if err != nil {
return Error.Wrap(err)
}
if subtle.ConstantTimeCompare(peer.ID.Bytes(), expectedNodeID.Bytes()) != 1 {
return Error.New("node id mismatch")
}
signee := signing.SigneeFromPeerIdentity(peer)
err = signee.HashAndVerifySignature(ctx,
append([]byte("noise-session-attestation-v1:"), attestation.NoiseHandshakeHash...),
attestation.Signature)
return Error.Wrap(err)
}
|
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package payment
import (
"context"
ppayment "perun.network/go-perun/apps/payment"
pchannel "perun.network/go-perun/channel"
"github.com/hyperledger-labs/perun-node"
"github.com/hyperledger-labs/perun-node/blockchain/ethereum"
)
type (
// PayChProposalNotif represents the channel update notification data for payment app.
PayChProposalNotif struct {
ProposalID string
OpeningBalInfo perun.BalInfo
ChallengeDurSecs uint64
Expiry int64
}
// PayChProposalNotifier represents the channel update notification function for payment app.
PayChProposalNotifier func(PayChProposalNotif)
)
// init() initializes the payment app in go-perun.
func init() {
wb := ethereum.NewWalletBackend()
emptyAddr, err := wb.ParseAddr("0x0")
if err != nil {
panic("Error parsing zero address for app payment def: " + err.Error())
}
ppayment.SetAppDef(emptyAddr) // dummy app def.
}
// OpenSession opens a session and interprets the restored channels as payment channels.
func OpenSession(n perun.NodeAPI, configFile string) (string, []PayChInfo, error) {
sessionID, restoredChsInfo, err := n.OpenSession(configFile)
return sessionID, toPayChsInfo(restoredChsInfo), err
}
// OpenPayCh opens a payment channel using the given sessionAPI instance with the specified parameters.
func OpenPayCh(pctx context.Context, s perun.SessionAPI, openingBalInfo perun.BalInfo, challengeDurSecs uint64) (
PayChInfo, error) {
paymentApp := perun.App{
Def: pchannel.NoApp(),
Data: pchannel.NoData(),
}
chInfo, err := s.OpenCh(pctx, openingBalInfo, paymentApp, challengeDurSecs)
return toPayChInfo(chInfo), err
}
// GetPayChsInfo returns a list of payment channel info for all the channels in this session.
func GetPayChsInfo(s perun.SessionAPI) []PayChInfo {
chsInfo := s.GetChsInfo()
payChsInfo := make([]PayChInfo, len(chsInfo))
for i := range chsInfo {
payChsInfo[i] = toPayChInfo(chsInfo[i])
}
return payChsInfo
}
// SubPayChProposals sets up a subscription for payment channel proposals.
func SubPayChProposals(s perun.SessionAPI, notifier PayChProposalNotifier) error {
return s.SubChProposals(func(notif perun.ChProposalNotif) {
notifier(PayChProposalNotif{
ProposalID: notif.ProposalID,
OpeningBalInfo: notif.OpeningBalInfo,
ChallengeDurSecs: notif.ChallengeDurSecs,
Expiry: notif.Expiry,
})
})
}
// UnsubPayChProposals deletes the existing subscription for payment channel proposals.
func UnsubPayChProposals(s perun.SessionAPI) error {
return s.UnsubChProposals()
}
// RespondPayChProposal sends the response to a payment channel proposal notification.
func RespondPayChProposal(pctx context.Context, s perun.SessionAPI, proposalID string, accept bool) (PayChInfo, error) {
chInfo, err := s.RespondChProposal(pctx, proposalID, accept)
return toPayChInfo(chInfo), err
}
// CloseSession closes the current session.
func CloseSession(s perun.SessionAPI, force bool) ([]PayChInfo, error) {
openChsInfo, err := s.Close(force)
return toPayChsInfo(openChsInfo), err
}
|
// project euler (projecteuler.net) problem 17
// solution by Kevin Retzke (retzkek@gmail.com)
// December 2012
package main
import (
"fmt"
"os"
)
var (
digits = []string{
"zero", "one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "ten", "eleven",
"twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen"}
tens = []string{
"zero", "ten", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy", "eighty", "ninety"}
)
func numToWords(num int) string {
var str string
if num < 20 {
str += digits[num]
} else if num < 100 {
str += tens[num/10]
if num%10 > 0 {
str = fmt.Sprintf("%v-%v", str, numToWords(num%10))
}
} else if num < 1000 {
str = fmt.Sprintf("%v hundred", digits[num/100])
if num%100 > 0 {
str = fmt.Sprintf("%v and %v", str, numToWords(num%100))
}
} else if num < 1000000 {
str = fmt.Sprintf("%v thousand", digits[num/1000])
if num%1000 > 0 {
str = fmt.Sprintf("%v and %v", str, numToWords(num%1000))
}
}
return str
}
func countLetters(str string) int {
var cnt int
for _, c := range str {
if c != ' ' && c != '-' {
cnt++
}
}
return cnt
}
func main() {
if r := countLetters(numToWords(342)); r != 23 {
fmt.Printf("Test failed. countLetters(342) = %v (should be 23)\n", r)
os.Exit(1)
}
if r := countLetters(numToWords(115)); r != 20 {
fmt.Printf("Test failed. countLetters(115) = %v (should be 20)\n", r)
os.Exit(1)
}
cnt := 0
for i := 1; i <= 1000; i++ {
cnt += countLetters(numToWords(i))
}
fmt.Println(cnt)
}
|
package goroutinePool
type signal struct {}
type f func() error
type Pool struct {
capacity int32
running int32
freeSignal chan signal
} |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"io"
"github.com/google/gapid/core/data/endian"
"github.com/google/gapid/core/os/device"
)
var magic = [4]byte{'s', 'p', 'y', '0'}
const version = 2
// The GAPII header is defined as:
//
// const size_t MAX_PATH = 512;
//
// struct ConnectionHeader {
// uint8_t mMagic[4]; // 's', 'p', 'y', '0'
// uint32_t mVersion; // 2
// uint32_t mObserveFrameFrequency; // non-zero == enabled.
// uint32_t mObserveDrawFrequency; // non-zero == enabled.
// uint32_t mStartFrame; // non-zero == Frame to start at.
// uint32_t mNumFrames; // non-zero == Number of frames to capture.
// uint32_t mAPIs; // Bitset of APIS to enable.
// uint32_t mFlags; // Combination of FLAG_XX bits.
// char mLibInterceptorPath[MAX_PATH]; // Path to libinterceptor.so
// };
//
// All fields are encoded little-endian with no compression, regardless of
// architecture. All changes must be kept in sync with:
// platform/tools/gpu/gapii/cc/connection_header.h
func sendHeader(out io.Writer, options Options, gvrHandle uint64, libInterceptorPath string) error {
const maxPath = 512
w := endian.Writer(out, device.LittleEndian)
for _, m := range magic {
w.Uint8(m)
}
w.Uint32(version)
w.Uint32(options.ObserveFrameFrequency)
w.Uint32(options.ObserveDrawFrequency)
w.Uint32(options.StartFrame)
w.Uint32(options.FramesToCapture)
w.Uint32(options.APIs)
w.Uint32(uint32(options.Flags))
w.Uint64(gvrHandle)
var path [maxPath]byte
copy(path[:], libInterceptorPath)
w.Data(path[:])
return w.Error()
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package testutils
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/stretchr/testify/assert"
)
func TestScalarVars(t *testing.T) {
var md opt.Metadata
var sv ScalarVars
// toStr recreates the variable definitions from md and ScalarVars.
toStr := func() string {
var buf bytes.Buffer
for i := 0; i < md.NumColumns(); i++ {
id := opt.ColumnID(i + 1)
m := md.ColumnMeta(id)
if i > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(&buf, "%s %s", m.Alias, m.Type)
if sv.NotNullCols().Contains(id) {
buf.WriteString(" not null")
}
}
return buf.String()
}
vars := "a int, b string not null, c decimal"
md.Init()
if err := sv.Init(&md, strings.Split(vars, ", ")); err != nil {
t.Fatal(err)
}
assert.Equal(t, toStr(), vars)
}
|
package controller
import (
"encoding/json"
"fmt"
"time"
"github.com/Brickchain/go-crypto.v2"
"github.com/Brickchain/go-document.v2"
jose "gopkg.in/square/go-jose.v1"
)
// VerifyMandateToken is used to verify that a mandate-token is correctly signed
func VerifyMandateToken(token string, mandateSigner *jose.JsonWebKey, keyLevel int) ([]*document.Mandate, *jose.JsonWebKey, error) {
tokenJWS, err := crypto.UnmarshalSignature([]byte(token))
if err != nil {
return nil, nil, err
}
if len(tokenJWS.Signatures) < 1 {
return nil, nil, fmt.Errorf("No signers of token")
}
clientKey := tokenJWS.Signatures[0].Header.JsonWebKey
tokenPayload, err := tokenJWS.Verify(clientKey)
if err != nil {
return nil, nil, err
}
var mandateToken *document.MandateToken
err = json.Unmarshal(tokenPayload, &mandateToken)
if err != nil {
return nil, nil, err
}
if mandateToken.Timestamp.Add(time.Second * time.Duration(mandateToken.TTL)).Before(time.Now().UTC()) {
return nil, nil, fmt.Errorf("Token has expired")
}
if mandateToken.Certificate != "" {
certChain, err := crypto.VerifyCertificate(mandateToken.Certificate, keyLevel)
if err != nil {
return nil, nil, err
}
clientKey = certChain.Issuer
}
mandates := make([]*document.Mandate, 0)
for _, mandateString := range mandateToken.Mandates {
mandateJWS, err := crypto.UnmarshalSignature([]byte(mandateString))
if err != nil {
return mandates, nil, err
}
if len(mandateJWS.Signatures) < 1 {
return mandates, nil, fmt.Errorf("No signers of mandate")
}
if crypto.Thumbprint(mandateJWS.Signatures[0].Header.JsonWebKey) != crypto.Thumbprint(mandateSigner) {
return mandates, nil, fmt.Errorf("Mandate not signed by correct key")
}
mandatePayload, err := mandateJWS.Verify(mandateSigner)
if err != nil {
return mandates, nil, err
}
var mandate *document.Mandate
if err := json.Unmarshal(mandatePayload, &mandate); err != nil {
return mandates, nil, err
}
if mandate.ValidFrom.Before(time.Now().UTC()) {
return mandates, nil, fmt.Errorf("Mandate not yet valid")
}
if mandate.ValidUntil.After(time.Now().UTC()) {
return mandates, nil, fmt.Errorf("Mandate has expired")
}
mandates = append(mandates, mandate)
}
return mandates, clientKey, err
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
)
type Objects struct {
Objects []object `json:"Objects"`
}
type object struct {
Topic string `json:"topic"`
Inner inner `json:"inner"`
}
type inner struct {
Inside1 int `json:"inside1"`
Inside2 int `json:"inside2"`
Inside3 int `json:"inside3"`
}
func main() {
f, err := os.Open("test.json")
if err != nil {
panic(err.Error())
}
log.Println("JSON file successfully opened.")
jsonToByteArr, err := ioutil.ReadAll(f)
if err != nil {
panic(err.Error())
}
log.Println("JSON file successfully read.")
var objects Objects
json.Unmarshal(jsonToByteArr, &objects)
for i := 0; i < len(objects.Objects); i++ {
fmt.Println(objects.Objects[i].Topic)
fmt.Println(objects.Objects[i].Inner.Inside1)
fmt.Println(objects.Objects[i].Inner.Inside2)
fmt.Println(objects.Objects[i].Inner.Inside3)
}
//read in json using interface{}
var aMap map[string]interface{}
json.Unmarshal(jsonToByteArr, &aMap)
//fmt.Println(aMap)
for key, value := range aMap {
fmt.Println("key: ", key)
fmt.Println("value: ", value)
//TODO figure out how to get the inner map, another interface maybe?
}
defer f.Close()
}
|
package sleep
import (
"testing"
"time"
)
func Test(t *testing.T) {
const sec = 2
start := time.Now()
Sleep(sec)
stop := time.Since(start).Seconds()
if stop < sec || stop > sec*1.05 {
t.Error("Incorrect sleep function")
}
}
|
package main
import (
"testing"
)
func TestEncrypt(t *testing.T) {
key := GetRandomName()
encrypted, salt, iv := Encrypt([]byte("hello, world"), key)
decrypted, err := Decrypt(encrypted, key, salt, iv)
if err != nil {
t.Error(err)
}
if string(decrypted) != "hello, world" {
t.Error("problem decrypting")
}
_, err = Decrypt(encrypted, "wrong passphrase", salt, iv)
if err == nil {
t.Error("should not work!")
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package litecoin
import (
"bytes"
"crypto/sha256"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/util"
)
// Version - to hold the type of the address
type Version byte
// AddressBytes - to hold the fixed-length address bytes
type AddressBytes [20]byte
// from: https://en.bitcoin.it/wiki/List_of_address_prefixes
const (
Livenet Version = 48
LivenetScript Version = 5
LivenetScript2 Version = 50
Testnet Version = 111
TestnetScript Version = 196
TestnetScript2 Version = 58
vNull Version = 0xff
)
// ValidateAddress - check the address and return its version
func ValidateAddress(address string) (Version, AddressBytes, error) {
addr := util.FromBase58(address)
addressBytes := AddressBytes{}
if 25 != len(addr) {
return vNull, addressBytes, fault.InvalidLitecoinAddress
}
h := sha256.New()
h.Write(addr[:21])
d := h.Sum([]byte{})
h = sha256.New()
h.Write(d)
d = h.Sum([]byte{})
if !bytes.Equal(d[0:4], addr[21:]) {
return vNull, addressBytes, fault.InvalidLitecoinAddress
}
switch Version(addr[0]) {
case Livenet, LivenetScript, LivenetScript2, Testnet, TestnetScript, TestnetScript2:
// OK
default:
return vNull, addressBytes, fault.InvalidLitecoinAddress
}
copy(addressBytes[:], addr[1:21])
return Version(addr[0]), addressBytes, nil
}
// TransformAddress - convert address to/from new version prefix
func TransformAddress(address string) (string, error) {
version, addressBytes, err := ValidateAddress(address)
if nil != err {
return "", err
}
switch version {
case Livenet:
return address, nil
case LivenetScript:
return compose(LivenetScript2, addressBytes), nil
case LivenetScript2:
return compose(LivenetScript, addressBytes), nil
case Testnet:
return address, nil
case TestnetScript:
return compose(TestnetScript2, addressBytes), nil
case TestnetScript2:
return compose(TestnetScript, addressBytes), nil
default:
return "", fault.InvalidLitecoinAddress
}
}
// IsTestnet - detect if version is a testnet value
func IsTestnet(version Version) bool {
switch version {
case Testnet, TestnetScript, TestnetScript2:
return true
default:
return false
}
}
// build address
func compose(version Version, addressBytes AddressBytes) string {
addr := append([]byte{byte(version)}, addressBytes[:]...)
h := sha256.New()
h.Write(addr)
d := h.Sum([]byte{})
h = sha256.New()
h.Write(d)
d = h.Sum([]byte{})
addr = append(addr, d[0:4]...)
return util.ToBase58(addr)
}
|
package longest_repeating_character_replacement
func characterReplacement(s string, k int) int {
// find all [start,end] that tolerate k operations to make all characters the same
//
// Create two pointers: start and end.
//
// The `end` pointer starts at 0, try to find the right most position
//
// if end - start - maxSoFar + 1 > k, we move the start pointer, and update the counter
//
//
start, end := 0, 0
counter := make(map[uint8]int)
maxLength := 0
result := 0
for ; end < len(s); end++ {
counter[s[end]]++
// if s[end] becomes the characters that occurs the most in the current window
// update the maxLength
if v := counter[s[end]]; v > maxLength {
maxLength = v
}
// end-start+1: the length of the window
// maxLength: the maximum number of the character that occurs the most in the current window
// end-start+1-maxLength: needed replacement count
for end-start+1-maxLength > k {
// move the start pointer so that:
// end - start - maxLength + 1 == k
// which makes the current window [start,end] to have exact k operations
counter[s[start]]--
start++
}
// update the result
if v := end - start + 1; v > result {
result = v
}
}
return result
}
|
package math
func Sum(intSliceParam []int)int {
total:=0
for _,x := range intSliceParam{
total += x
}
return total
}
|
package Lecture02
import (
"math/rand"
"time"
)
//Quick Sort Algothrims
func QuickSort(inputs []int)([]int){
inputs = Shuffle(inputs)
hi:=len(inputs)-1
sortforquick(0,hi,inputs)
return inputs
}
func sortforquick(lo,hi int,inputs []int){
if hi<=lo{
return
}
j := partition(lo,hi,inputs)
sortforquick(lo,j-1,inputs)
sortforquick(j+1,hi,inputs)
}
//divide
func partition(lo,hi int,inputs []int)(j int){
lefP,rigP := lo+1,hi
v := inputs[lo]
for {
for less(inputs[lefP],v){
lefP++
if (lefP>=hi){
break
}
}
for less(v,inputs[rigP]){
rigP--
if (rigP<=lo){
break
}
}
if (lefP>=rigP){
break
}
exch(lefP,rigP,inputs)
}
exch(lo,rigP,inputs)
return rigP
}
//exchange inputs[i], inputs[j]
func exch(i,j int, inputs []int){
inputs[i],inputs[j] = inputs[j],inputs[i]
}
//随机Shuffle
//return pseudo-random array from vals array
func Shuffle(vals []int) []int {
r := rand.New(rand.NewSource(time.Now().Unix()))
ret := make([]int, len(vals))
perm := r.Perm(len(vals))
for i, randIndex := range perm {
ret[i] = vals[randIndex]
}
return ret
}
|
package verifier
import (
"RSAverify2/myprime"
"math/rand"
)
type Verifier struct {
P int
Q int
N int
Nn int
Orla int
PublicKey int
PrivateKey int
A int
B int
C int
Stext int
Dtext int
}
func Gen_rand() int {
return rand.Intn(1000)
}
func Cal(a int, b int) int {
return a ^ b
}
//10-1000的素数
func (r *Verifier) Gen_key() {
mp := new(myprime.MyPrime)
mp.Init()
for {
num1 := rand.Intn(mp.GetPrimeNum())
r.P = int(mp.GetPrime(num1))
for {
num1 := rand.Intn(mp.GetPrimeNum())
r.Q = int(mp.GetPrime(num1))
if r.Q != r.P {
break
}
}
if r.Q > 10 && r.P > 10 {
break
}
}
r.N = r.P * r.Q
r.Orla = (r.P - 1) * (r.Q - 1)
for {
num1 := rand.Intn(mp.GetPrimeNum())
r.PublicKey = int(mp.GetPrime(num1))
//r.PublicKey = rand.Intn(r.Orla)
if r.Orla%r.PublicKey != 0 {
break
}
}
//扩展欧几里得求私钥
var y int
myprime.GcdEx(r.PublicKey, r.Orla, &r.PrivateKey, &y)
if r.PrivateKey < 0 {
r.PrivateKey = (r.PrivateKey%r.Orla + r.Orla) % r.Orla
}
}
func (r *Verifier) Encrypt(m int) {
r.Stext = myprime.ModPow(m, r.PublicKey, r.N)
}
func (r *Verifier) Dcode(c int) {
r.Dtext = myprime.ModPow(c, r.PrivateKey, r.Nn)
}
func (r *Verifier) Judge() bool {
if r.B == r.Dtext {
return true
} else {
return false
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package task_test
import (
"testing"
"time"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/event/task"
"github.com/google/gapid/core/log"
)
func TestEvents(t *testing.T) {
ctx := log.Testing(t)
signals := task.Events{}
in1, in1fire := task.NewSignal()
in2, in2fire := task.NewSignal()
out, outfire := task.NewSignal()
signals.Add(in1)
assert.For(ctx, "Size after 1").That(signals.Pending()).Equals(1)
signals.Add(in2)
assert.For(ctx, "Size after 2").That(signals.Pending()).Equals(2)
go func() {
signals.Wait(ctx)
assert.For(ctx, "TryWait after Wait").That(signals.TryWait(ctx, time.Millisecond)).Equals(true)
outfire(ctx)
}()
assert.For(ctx, "Out before signal 1").That(out.TryWait(ctx, ExpectBlocking)).Equals(false)
assert.For(ctx, "Pending list before signal 1").That(signals.Pending()).Equals(2)
in1fire(ctx)
assert.For(ctx, "Out between signals").That(out.TryWait(ctx, ExpectBlocking)).Equals(false)
assert.For(ctx, "Pending list after signal 1").That(signals.Pending()).Equals(1)
in2fire(ctx)
assert.For(ctx, "Out after all signals").That(out.TryWait(ctx, ExpectNonBlocking)).Equals(true)
assert.For(ctx, "Pending list after all signals").That(signals.Pending()).Equals(0)
}
|
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nvgpu
// From src/nvidia/interface/deprecated/rmapi_deprecated.h:
const (
RM_GSS_LEGACY_MASK = 0x00008000
)
// From src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h:
// +marshal
type NVXXXX_CTRL_XXX_INFO struct {
Index uint32
Data uint32
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h:
const (
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE = 0xd01
NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY = 0xd04
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h:
const (
NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS = 0x201
NV0000_CTRL_CMD_GPU_GET_ID_INFO = 0x202
NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 = 0x205
NV0000_CTRL_CMD_GPU_GET_PROBED_IDS = 0x214
NV0000_CTRL_CMD_GPU_ATTACH_IDS = 0x215
NV0000_CTRL_CMD_GPU_DETACH_IDS = 0x216
NV0000_CTRL_CMD_GPU_GET_PCI_INFO = 0x21b
NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE = 0x279
NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE = 0x27b
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h:
const (
NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_INFO = 0xa04
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h:
const (
NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION = 0x101
NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS = 0x127
NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS = 0x136
NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX = 0x13a
)
// +marshal
type NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS struct {
SizeOfStrings uint32
Pad [4]byte
PDriverVersionBuffer P64
PVersionBuffer P64
PTitleBuffer P64
ChangelistNumber uint32
OfficialChangelistNumber uint32
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h:
const (
NV0080_CTRL_CMD_FB_GET_CAPS_V2 = 0x801307
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h:
const (
NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST = 0x80170d
)
// +marshal
type NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS struct {
NumChannels uint32
Pad [4]byte
PChannelHandleList P64
PChannelList P64
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h:
const (
NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES = 0x800280
NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE = 0x800288
NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE = 0x800289
NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2 = 0x800292
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h:
// +marshal
type NV0080_CTRL_GR_ROUTE_INFO struct {
Flags uint32
Pad [4]byte
Route uint64
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h:
const (
NV0080_CTRL_CMD_HOST_GET_CAPS_V2 = 0x801402
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h:
const (
NV2080_CTRL_CMD_BUS_GET_PCI_INFO = 0x20801801
NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO = 0x20801803
NV2080_CTRL_CMD_BUS_GET_INFO_V2 = 0x20801823
NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS = 0x2080182a
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h:
const (
NV2080_CTRL_CMD_CE_GET_ALL_CAPS = 0x20802a0a
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h:
const (
NV2080_CTRL_CMD_FB_GET_INFO_V2 = 0x20801303
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h:
const (
NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS = 0x2080110b
NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES = 64
)
// +marshal
type NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS struct {
BDisable uint8
Pad1 [3]byte
NumChannels uint32
BOnlyDisableScheduling uint8
BRewindGpPut uint8
Pad2 [6]byte
PRunlistPreemptEvent P64
HClientList [NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]Handle
HChannelList [NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]Handle
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h:
const (
NV2080_CTRL_CMD_GPU_GET_INFO_V2 = 0x20800102
NV2080_CTRL_CMD_GPU_GET_NAME_STRING = 0x20800110
NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING = 0x20800111
NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO = 0x20800119
NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS = 0x2080012f
NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES = 0x20800131
NV2080_CTRL_CMD_GPU_ACQUIRE_COMPUTE_MODE_RESERVATION = 0x20800145 // undocumented; paramSize == 0
NV2080_CTRL_CMD_GPU_RELEASE_COMPUTE_MODE_RESERVATION = 0x20800146 // undocumented; paramSize == 0
NV2080_CTRL_CMD_GPU_GET_GID_INFO = 0x2080014a
NV2080_CTRL_CMD_GPU_GET_ENGINES_V2 = 0x20800170
NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS = 0x2080018b
NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG = 0x20800195
NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO = 0x208001a3
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h:
const (
NV2080_CTRL_CMD_GR_GET_INFO = 0x20801201
NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE = 0x20801210
NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE = 0x20801218
NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER = 0x2080121b
NV2080_CTRL_CMD_GR_GET_CAPS_V2 = 0x20801227
NV2080_CTRL_CMD_GR_GET_GPC_MASK = 0x2080122a
NV2080_CTRL_CMD_GR_GET_TPC_MASK = 0x2080122b
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h:
const (
NV2080_CTRL_CMD_GSP_GET_FEATURES = 0x20803601
)
// +marshal
type NV2080_CTRL_GR_GET_INFO_PARAMS struct {
GRInfoListSize uint32 // in elements
Pad [4]byte
GRInfoList P64
GRRouteInfo NV0080_CTRL_GR_ROUTE_INFO
}
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h:
const (
NV2080_CTRL_CMD_MC_GET_ARCH_INFO = 0x20801701
NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS = 0x20801702
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h:
const (
NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS = 0x20803002
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h:
const (
NV2080_CTRL_CMD_PERF_BOOST = 0x2080200a
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h:
const (
NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO = 0x20802209
NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS = 0x2080220c
NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG = 0x20802210
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h:
const (
NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO = 0x20800406
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl503c.h:
const (
NV503C_CTRL_CMD_REGISTER_VA_SPACE = 0x503c0102
NV503C_CTRL_CMD_REGISTER_VIDMEM = 0x503c0104
NV503C_CTRL_CMD_UNREGISTER_VIDMEM = 0x503c0105
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h:
const (
NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK = 0x83de0309
NV83DE_CTRL_CMD_DEBUG_READ_ALL_SM_ERROR_STATES = 0x83de030c
NV83DE_CTRL_CMD_DEBUG_CLEAR_ALL_SM_ERROR_STATES = 0x83de0310
)
// From src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h:
const (
NVC36F_CTRL_GET_CLASS_ENGINEID = 0xc36f0101
NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN = 0xc36f0108
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl906f.h:
const (
NV906F_CTRL_CMD_RESET_CHANNEL = 0x906f0102
)
// From src/common/sdk/nvidia/inc/ctrl/ctrl90e6.h:
const (
NV90E6_CTRL_CMD_MASTER_GET_ERROR_INTR_OFFSET_MASK = 0x90e60101
NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK = 0x90e60102
)
// From src/common/sdk/nvidia/inc/ctrl/ctrla06c.h:
const (
NVA06C_CTRL_CMD_GPFIFO_SCHEDULE = 0xa06c0101
NVA06C_CTRL_CMD_SET_TIMESLICE = 0xa06c0103
NVA06C_CTRL_CMD_PREEMPT = 0xa06c0105
)
|
package main
// TODO Spit the source code to several directories (cmd, util, common, api, etc.) and files
import (
// TODO Using github.com/spf13/{pflag,cobra,viper} for better parameter handling
"flag"
// TODO Using github.com/golang/glog for better logging
"encoding/json"
"fmt"
"log"
"math"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
)
// TODO move to common directory
const (
// OptRadius is the radius around the Center point in meters
OptRadius = "radius"
// OptCenterLat is the latitude of Center (a point of the distance)
OptCenterLat = "centerLat"
// OptCenterLon is the longitude of Center (a point of the distance)
OptCenterLon = "centerLon"
// OptUserName is the Cloudant user name
OptUserName = "userName"
// OptDbName is the Cloudant DB name
OptDbName = "dbName"
// OptDesignName is the Cloudant design name
OptDesignName = "designName"
// OptIndexName is the Cloudant index name
OptIndexName = "indexName"
// OptSearchLimit is the limit of provided records by Cloudant
OptSearchLimit = "searchLimit"
// OptService starts REST service, if true
OptService = "service"
// OptListenOn sets host:port where the service listens on
OptListenOn = "listenOn"
// RadDeg is 1 rad in degree: 57.295779513
RadDeg = 180.0 / math.Pi
// Earth radius in meter
EarthRadius = 6371e3
)
// CloudantConfig contains Coudant client and DB config
type CloudantConfig struct {
userName string
dbName string
designName string
indexName string
searchLimit int
}
// ServiceConfig describes the REST service config
type ServiceConfig struct {
startService bool
listenOn string
}
// TODO Move to common directory
var (
GlobalCliRequest DistanceListRequest
GlobalCloudantConfig CloudantConfig
GlobalServiceConfig ServiceConfig
)
// GeoCoord is a 2D coordinate type (latitude, longitude)
type GeoCoord struct {
Lat float64
Lon float64
}
// DistanceListRequest contains the CLI or REST parameters for distance list calculation
type DistanceListRequest struct {
Radius float64
Center GeoCoord
}
// Airport record type (REST reponse)
type Airport struct {
ID string `json:"id"`
Name string `json:"name"`
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Distance float64 `json:"distance"`
}
// DistanceListResponse contains response for the CLI or REST query
// TODO replace Errors to []error and implementing custom marshaller
type DistanceListResponse struct {
Request DistanceListRequest `json:"request"`
Errors []string `json:"errors"`
Airports []Airport `json:"airports"`
}
func main() {
if GlobalServiceConfig.startService {
startService()
} else {
executeCli()
}
}
// startServiceHandler start the REST service
func startService() {
log.Printf("Listening on %s\n", GlobalServiceConfig.listenOn)
err := http.ListenAndServe(GlobalServiceConfig.listenOn, nil)
if err != nil {
log.Fatalf("Server error: %s", err.Error())
}
}
// handleDistanceList is the handler function of /list/distance
// TODO Remove JSON indentation
func handleDistanceList(w http.ResponseWriter, r *http.Request) {
responseBytes := []byte{}
w.Header().Set("Content-Type", "application/json")
parserErrors := make([]string, 0, 3) // capacity: max. number of parser error
values := r.URL.Query()
request := DistanceListRequest{
Radius: parseFloatQueryParam(values, OptRadius, GlobalCliRequest.Radius, &parserErrors),
Center: GeoCoord{
Lat: parseFloatQueryParam(values, OptCenterLat, GlobalCliRequest.Center.Lat, &parserErrors),
Lon: parseFloatQueryParam(values, OptCenterLon, GlobalCliRequest.Center.Lon, &parserErrors),
},
}
if len(parserErrors) > 0 {
response := DistanceListResponse{
Request: request,
Errors: parserErrors,
Airports: []Airport{},
}
// TODO handling marshalling error
responseBytes, _ = json.MarshalIndent(response, "", " ")
w.WriteHeader(http.StatusPreconditionFailed)
} else {
response, err := doDistanceList(request)
// TODO handling marshalling error
responseBytes, _ = json.MarshalIndent(response, "", " ")
if err != nil {
w.WriteHeader(http.StatusPreconditionFailed)
}
}
w.Write(responseBytes)
}
// parseFloatQueryParam parses a float and adds error message to errors, if needed
func parseFloatQueryParam(values url.Values, name string, defaultValue float64, errors *[]string) float64 {
value := defaultValue
var err error
if valueStr := values.Get(name); valueStr != "" {
if value, err = strconv.ParseFloat(valueStr, 64); err != nil {
(*errors) = append(*errors, err.Error())
}
}
return value
}
// executeCli makes 1 CLI operation
// TODO Remove JSON indentation
func executeCli() {
var err error
response, err := doDistanceList(GlobalCliRequest)
// TODO handling marshalling error
responseBytes, _ := json.MarshalIndent(response, "", " ")
fmt.Println(string(responseBytes))
if err != nil {
os.Exit(1)
}
}
// doDistanceList is the main function to collect, order and marshall response
// The response is structure, which contains error messages, too; ready to make a JSON text
func doDistanceList(request DistanceListRequest) (DistanceListResponse, error) {
var err error
reponse := DistanceListResponse{
Request: request,
Errors: []string{},
Airports: []Airport{},
}
airports := []Airport{}
if err = checkCorrectDistanceListRequest(request); err != nil {
reponse.Errors = append(reponse.Errors, err.Error())
} else if airports, err = GetAirportRecords(request); err != nil {
reponse.Errors = append(reponse.Errors, err.Error())
} else {
calculateDistances(airports, GlobalCliRequest.Center)
reponse.Airports = filterByRadius(airports, request.Radius)
orderByDistance(reponse.Airports)
}
return reponse, err
}
// checkCorrectDistanceListRequest checks and corrects a DistanceListRequest
// TODO More strict and robust error handling (semantical check)
func checkCorrectDistanceListRequest(request DistanceListRequest) error {
return nil
}
// calculateDistances calculates distance between the airports and center coordinates
func calculateDistances(airports []Airport, center GeoCoord) {
for a, airport := range airports {
airports[a].Distance = calculateDistance(
airport.Lat/RadDeg, airport.Lon/RadDeg,
center.Lat/RadDeg, center.Lon/RadDeg,
)
}
}
func filterByRadius(airports []Airport, radius float64) []Airport {
filteredAirports := make([]Airport, 0, len(airports))
for _, airport := range airports {
if airport.Distance <= radius {
filteredAirports = append(filteredAirports, airport)
}
}
return filteredAirports
}
// calculateDistance calculates distance between 2 spherical coords (input in radians)
// See: https://www.movable-type.co.uk/scripts/latlong.html
func calculateDistance(lat1, lon1, lat2, lon2 float64) float64 {
R := EarthRadius
fi1 := lat1
fi2 := lat2
dFi := lat2 - lat1
dLambda := lon2 - lon1
a := math.Sin(dFi/2.0)*math.Sin(dFi/2.0) +
math.Cos(fi1)*math.Cos(fi2)*math.Sin(dLambda/2.0)*math.Sin(dLambda/2.0)
c := 2.0 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
return R * c
}
// orderByDistance sorts airports in ascending distance order
// TODO Review ordering rule if distances are equal (different names at same lat/lon position)
func orderByDistance(airports []Airport) {
sort.Slice(airports, func(i, j int) bool {
if airports[i].Distance == airports[j].Distance {
return strings.Compare(airports[i].Name, airports[j].Name) < 0
}
return airports[i].Distance < airports[j].Distance
})
}
// parseParams is called by init()
func parseParams() {
flag.Float64Var(&GlobalCliRequest.Radius, OptRadius, 500000.0, "Radius around the center point in meters")
flag.Float64Var(&GlobalCliRequest.Center.Lat, OptCenterLat, 2.0, "Latitude of Center corner")
flag.Float64Var(&GlobalCliRequest.Center.Lon, OptCenterLon, 3.0, "Longitude of Center corner")
flag.StringVar(&GlobalCloudantConfig.userName, OptUserName, "mikerhodes", "Cloudant user name")
flag.StringVar(&GlobalCloudantConfig.dbName, OptDbName, "airportdb", "Cloudant DB name")
flag.StringVar(&GlobalCloudantConfig.designName, OptDesignName, "view1", "Cloudant design name")
flag.StringVar(&GlobalCloudantConfig.indexName, OptIndexName, "geo", "Cloudant index name")
flag.IntVar(&GlobalCloudantConfig.searchLimit, OptSearchLimit, 100, "Limit of provided records by Cloudant, must not exceed 200")
flag.BoolVar(&GlobalServiceConfig.startService, OptService, false, "If true: starts REST service, instead of CLI operation")
flag.StringVar(&GlobalServiceConfig.listenOn, OptListenOn, ":8080", "host:port where the REST service listens on")
flag.Parse()
}
func init() {
parseParams()
http.HandleFunc("/list/distance", handleDistanceList)
}
|
package main
import (
"fmt"
"runtime"
"strconv"
"sync"
)
var wg sync.WaitGroup
func main() {
jobs := 10
//控制线程数量
pool := 3
jobChans := make(chan int, pool)
for i := 0; i < pool; i++ {
go func() {
for ch := range jobChans {
fmt.Println("hello" + strconv.Itoa(ch))
wg.Done()
}
}()
}
for i := 0; i < jobs; i++ {
wg.Add(1)
jobChans <- i
fmt.Printf("index: %d, goroutine number: %d\n", i, runtime.NumGoroutine())
}
wg.Wait()
fmt.Println("done")
}
|
package ulist
import (
"net"
"net/http"
"os"
"strings"
"github.com/pirsquare/country-mapper"
)
var infoClient *country_mapper.CountryInfoClient
func init() {
const (
ipport = "127.0.0.1:25688"
loc = "vendor/github.com/pirsquare/country-mapper/files"
)
l, err := net.Listen("tcp", ipport)
if err != nil {
panic(err)
}
defer l.Close()
go func(l net.Listener) {
http.Serve(l, http.FileServer(http.Dir(loc)))
}(l)
cl, err := country_mapper.Load("http://" + ipport + "/country_info.csv")
if err != nil {
Log.Crit("unable to load country_mapper data", "err", err)
os.Exit(1)
}
infoClient = cl
}
func (ul *Ulist) GetByCountry(country string) *Ulist {
return ul.Filter(func(u User) bool {
return SameCountry(u.Address.Country, country)
})
}
const (
countryUS = "United States"
countryUK = "United Kingdom"
countryDE = "Deutschland"
countryNL = "Netherlands"
)
var countryReplacer = strings.NewReplacer(
"United States", countryUS,
"Unites States", countryUS,
"USA", countryUS,
"US", countryUS,
"United States of America", countryUS,
"North America", countryUS,
"United Kingdom", countryUK,
"Great Britain", countryUK,
"UK", countryUK,
"Deutschland", countryDE,
"Germany", countryDE,
"The Netherlands", countryNL,
)
func NormalizedCountry(country string) string {
return countryReplacer.Replace(country)
}
func ClusterCountry(country string) string {
cleaned := strings.TrimSpace(country)
d := infoClient.MapByName(cleaned)
if d == nil {
Log.Error("country not found", "str", country)
return ""
}
return d.Region
}
func SameCountry(a, b string) bool {
return NormalizedCountry(a) == NormalizedCountry(b)
}
func SameRegion(a, b string) bool {
clA := ClusterCountry(NormalizedCountry(a))
clB := ClusterCountry(NormalizedCountry(b))
return (clA == clB)
}
|
package goods
import (
"flea-market/model/dialogModel"
"flea-market/model/goodsModel"
"fmt"
"github.com/gin-gonic/gin"
"net/http"
"strconv"
)
// 定义接收数据的结构体
type ListParams struct {
CatId int `form:"cat_id" json:"cat_id" uri:"cat_id" xml:"cat_id"`
UserId int `form:"shop_id" json:"shop_id" uri:"shop_id" xml:"shop_id"`
PageSize int `form:"page_size" json:"page_size" uri:"page_size" xml:"page_size" binding:"required"`
PageNum int `form:"page_num" json:"page_num" uri:"page_num" xml:"page_num" binding:"required"`
}
func List(c *gin.Context) {
var listParams ListParams
if err := c.Bind(&listParams); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
//fmt.Println(listParams)
//where := "where status = 0 "
whereMap := make(map[string]string)
if listParams.CatId != 0 {
whereMap["cat_id"] = strconv.Itoa(listParams.CatId)
}
if listParams.UserId != 0 {
whereMap["user_id"] = strconv.Itoa(listParams.UserId)
}
where := ""
args := make([]interface{},0)
if len(whereMap) != 0 {
where = "where "
whereArr := make([]string,0)
for k,v := range whereMap {
if k == "keywords" {
whereArr = append(whereArr, fmt.Sprintf(" title like ? or content like ? " ))
args = append(args, "%"+ v + "%","%"+ v + "%")
} else {
whereArr = append(whereArr, k + " = ? ")
args = append(args, v )
}
}
for i := range whereArr {
if i == len(whereArr)-1 {
where += whereArr[i]
} else {
where += whereArr[i] + " and "
}
}
}
if count,err := goodsModel.GetCount(where,args...); err != nil {
c.JSON(http.StatusBadRequest,gin.H{"msg":err.Error()})
return
} else {
if count == 0 {
c.JSON(http.StatusOK,gin.H{"total":0,"list":[]interface{}{},"msg":"无更多内容"})
} else {
//fmt.Println(count)
if listParams.PageSize * (listParams.PageNum - 1) >= count {
c.JSON(http.StatusOK,gin.H{"total":0,"list":[]interface{}{},"msg":"无更多内容"})
return
}
where += " limit ?,? "
args = append(args, listParams.PageSize * (listParams.PageNum - 1), listParams.PageSize)
if list,err := goodsModel.GetGoods(where ,args... );err != nil {
c.JSON(http.StatusBadRequest,gin.H{"msg":err.Error()})
return
} else {
if len(list) >0 {
where := " where id in (select max(id) from f_dialog group by goods_id having goods_id in ("
for i:= 0; i< len(list);i++ {
where += strconv.Itoa(list[i].GoodsId) + ","
}
where = where[:len(where)-1] + ")) "
//fmt.Println(where)
if dialogList,err := dialogModel.GetDialogs(where);err != nil {
c.JSON(http.StatusBadRequest,gin.H{"msg":err.Error()})
return
} else {
for i := range list {
for j := range dialogList {
if list[i].GoodsId == dialogList[j].GoodsId {
list[i].NewMessage = dialogList[j]
break
}
}
}
}
c.JSON(http.StatusOK,gin.H{"list":list,"total":count,"page_size":listParams.PageSize,"msg":"成功!"})
return
}
}
}
}
} |
package k8swatch
import (
"context"
"fmt"
"net/url"
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
"github.com/tilt-dev/tilt/internal/controllers/apis/cluster"
"github.com/tilt-dev/tilt/internal/controllers/fake"
"github.com/tilt-dev/tilt/internal/k8s/testyaml"
"github.com/tilt-dev/tilt/internal/store/k8sconv"
"github.com/tilt-dev/tilt/internal/testutils"
"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
"github.com/tilt-dev/tilt/internal/testutils/servicebuilder"
"github.com/tilt-dev/tilt/internal/testutils/tempdir"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/model"
)
func TestServiceWatch(t *testing.T) {
f := newSWFixture(t)
nodePort := 9998
uid := types.UID("fake-uid")
manifest := f.addManifest("server")
s := servicebuilder.New(f.t, manifest).
WithPort(9998).
WithNodePort(int32(nodePort)).
WithIP(string(f.nip)).
WithUID(uid).
Build()
f.addDeployedService(manifest, s)
f.kClient.UpsertService(s)
require.NoError(f.t, f.sw.OnChange(f.ctx, f.store, store.LegacyChangeSummary()))
expectedSCA := ServiceChangeAction{
Service: s,
ManifestName: manifest.Name,
URL: &url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", f.nip, nodePort),
Path: "/",
},
}
f.assertObservedServiceChangeActions(expectedSCA)
}
// In many environments, we will get a Service change event
// faster than the `kubectl apply` finishes. So we need to hold onto
// the Service and dispatch an event when the UID returned by `kubectl apply`
// shows up.
func TestServiceWatchUIDDelayed(t *testing.T) {
f := newSWFixture(t)
uid := types.UID("fake-uid")
manifest := f.addManifest("server")
// the watcher won't start until it has a deployed object ref to find a namespace to watch in
// so we need to create at least one first
dummySvc := servicebuilder.New(t, manifest).WithUID("placeholder").Build()
f.kClient.UpsertService(dummySvc)
f.addDeployedService(manifest, dummySvc)
_ = f.sw.OnChange(f.ctx, f.store, store.LegacyChangeSummary())
// this service should be seen even by the watcher even though it's not yet referenced by the manifest
s := servicebuilder.New(f.t, manifest).
WithUID(uid).
Build()
f.kClient.UpsertService(s)
f.waitUntilServiceKnown(uid)
// once it's referenced by the manifest, an event should get emitted
f.addDeployedService(manifest, s)
expected := []ServiceChangeAction{
{
Service: dummySvc,
ManifestName: manifest.Name,
},
{
Service: s,
ManifestName: manifest.Name,
},
}
f.assertObservedServiceChangeActions(expected...)
}
func TestServiceWatchClusterChange(t *testing.T) {
f := newSWFixture(t)
port := int32(1234)
uid := types.UID("fake-uid")
manifest := f.addManifest("server")
s := servicebuilder.New(f.t, manifest).
WithPort(port).
WithNodePort(9998).
WithIP(string(f.nip)).
WithUID(uid).
Build()
f.addDeployedService(manifest, s)
f.kClient.UpsertService(s)
expectedSCA := ServiceChangeAction{
Service: s,
ManifestName: manifest.Name,
URL: &url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", f.nip, port),
Path: "/",
},
}
f.assertObservedServiceChangeActions(expectedSCA)
f.store.ClearActions()
newClusterClient := k8s.NewFakeK8sClient(t)
newSvc := s.DeepCopy()
port = 4567
newSvc.Spec.Ports[0].NodePort = 9997
newSvc.Spec.Ports[0].Port = port
newClusterClient.UpsertService(newSvc)
clusterNN := types.NamespacedName{Name: "default"}
// add the new client to
f.clients.SetK8sClient(clusterNN, newClusterClient)
_, createdAt, err := f.clients.GetK8sClient(clusterNN)
require.NoError(t, err, "Could not get cluster client hash")
state := f.store.LockMutableStateForTesting()
state.Clusters["default"].Status.ConnectedAt = createdAt.DeepCopy()
f.store.UnlockMutableState()
err = f.sw.OnChange(f.ctx, f.store, store.ChangeSummary{
Clusters: store.NewChangeSet(clusterNN),
})
require.NoError(t, err, "OnChange failed")
f.assertObservedServiceChangeActions(ServiceChangeAction{
Service: newSvc,
ManifestName: manifest.Name,
URL: &url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", f.nip, port),
Path: "/",
},
})
}
func (f *swFixture) addManifest(manifestName model.ManifestName) model.Manifest {
state := f.store.LockMutableStateForTesting()
defer f.store.UnlockMutableState()
m := manifestbuilder.New(f, manifestName).
WithK8sYAML(testyaml.SanchoYAML).
Build()
state.UpsertManifestTarget(store.NewManifestTarget(m))
return m
}
func (f *swFixture) addDeployedService(m model.Manifest, svc *v1.Service) {
defer func() {
require.NoError(f.t, f.sw.OnChange(f.ctx, f.store, store.LegacyChangeSummary()))
}()
state := f.store.LockMutableStateForTesting()
defer f.store.UnlockMutableState()
mState, ok := state.ManifestState(m.Name)
if !ok {
f.t.Fatalf("Unknown manifest: %s", m.Name)
}
runtimeState := mState.K8sRuntimeState()
runtimeState.ApplyFilter = &k8sconv.KubernetesApplyFilter{
DeployedRefs: k8s.ObjRefList{k8s.NewK8sEntity(svc).ToObjectReference()},
}
mState.RuntimeState = runtimeState
}
type swFixture struct {
*tempdir.TempDirFixture
t *testing.T
clients *cluster.FakeClientProvider
kClient *k8s.FakeK8sClient
nip k8s.NodeIP
sw *ServiceWatcher
ctx context.Context
cancel func()
store *store.TestingStore
}
func newSWFixture(t *testing.T) *swFixture {
nip := k8s.NodeIP("fakeip")
ctx, _, _ := testutils.CtxAndAnalyticsForTest()
ctx, cancel := context.WithCancel(ctx)
clients := cluster.NewFakeClientProvider(t, fake.NewFakeTiltClient())
kClient := clients.EnsureDefaultK8sCluster(ctx)
kClient.FakeNodeIP = nip
sw := NewServiceWatcher(clients, k8s.DefaultNamespace)
st := store.NewTestingStore()
state := st.LockMutableStateForTesting()
_, createdAt, err := clients.GetK8sClient(types.NamespacedName{Name: "default"})
require.NoError(t, err, "Failed to get default cluster client hash")
state.Clusters["default"] = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
},
Spec: v1alpha1.ClusterSpec{
Connection: &v1alpha1.ClusterConnection{
Kubernetes: &v1alpha1.KubernetesClusterConnection{},
},
},
Status: v1alpha1.ClusterStatus{
Arch: "fake-arch",
ConnectedAt: createdAt.DeepCopy(),
},
}
st.UnlockMutableState()
ret := &swFixture{
TempDirFixture: tempdir.NewTempDirFixture(t),
clients: clients,
kClient: kClient,
sw: sw,
nip: nip,
ctx: ctx,
cancel: cancel,
t: t,
store: st,
}
t.Cleanup(ret.TearDown)
return ret
}
func (f *swFixture) TearDown() {
f.cancel()
f.store.AssertNoErrorActions(f.t)
}
func (f *swFixture) assertObservedServiceChangeActions(expectedSCAs ...ServiceChangeAction) {
f.t.Helper()
start := time.Now()
for time.Since(start) < time.Second {
actions := f.store.Actions()
if len(actions) == len(expectedSCAs) {
break
}
}
var observedSCAs []ServiceChangeAction
for _, a := range f.store.Actions() {
sca, ok := a.(ServiceChangeAction)
if !ok {
f.t.Fatalf("got non-%T: %v", ServiceChangeAction{}, a)
}
observedSCAs = append(observedSCAs, sca)
}
if !assert.Equal(f.t, expectedSCAs, observedSCAs) {
f.t.FailNow()
}
}
func (f *swFixture) waitUntilServiceKnown(uid types.UID) {
clusterNN := types.NamespacedName{Name: v1alpha1.ClusterNameDefault}
start := time.Now()
for time.Since(start) < time.Second {
f.sw.mu.Lock()
_, known := f.sw.knownServices[clusterUID{cluster: clusterNN, uid: uid}]
f.sw.mu.Unlock()
if known {
return
}
time.Sleep(10 * time.Millisecond)
}
f.t.Fatalf("timeout waiting for service with UID: %s", uid)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package meta
import (
"context"
"github.com/google/go-cmp/cmp"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/meta"
"chromiumos/tast/testing"
"chromiumos/tast/testutil"
)
func init() {
testing.AddTest(&testing.Test{
Func: ServiceOutput,
Desc: "Ensure OutDir works for gRPC services",
Contacts: []string{"nya@chromium.org", "tast-owners@google.com"},
Attr: []string{"group:mainline"},
ServiceDeps: []string{"tast.cros.meta.FileOutputService"},
})
}
func ServiceOutput(ctx context.Context, s *testing.State) {
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
want := map[string]string{
"a.txt": "foo",
"dir/b.txt": "bar",
}
oc := meta.NewFileOutputServiceClient(cl.Conn)
if _, err := oc.SaveOutputFiles(ctx, &meta.SaveOutputFilesRequest{Files: want}); err != nil {
s.Fatal("SaveOutputs RPC failed: ", err)
}
got, err := testutil.ReadFiles(s.OutDir())
if err != nil {
s.Fatal("Failed to read OutDir: ", err)
}
if diff := cmp.Diff(got, want); diff != "" {
s.Error("OutDir content mismatch: ", diff)
}
}
|
package parser
import (
"errors"
"strings"
)
// ParseList parses and returns the full contents of a string.
func (p *Parser) ParseList() (result []string, err error) {
token := p.scnr.Peak()
if !token.IsList() {
return result, errors.New("called ParseList without the beginning token being a list start")
}
token.Data = trimListPrefix(token.Data)
for {
if token.IsList() {
trimmed := trimListSuffix(token.Data)
p.scnr.SetToken(trimmed)
token = p.scnr.Peak()
if token.IsString() {
str, err := p.ParseString()
if err != nil {
return result, err
}
result = append(result, str)
}
return result, err
}
token = p.scnr.Peak()
if token.IsString() {
str, err := p.ParseString()
if err != nil {
return result, err
}
result = append(result, str)
}
token, err = p.scnr.Next()
if err != nil {
return result, err
}
}
}
func trimListPrefix(token string) (result string) {
result = strings.TrimLeftFunc(token, func(input rune) bool {
return input == '['
})
return result
}
func trimListSuffix(token string) (result string) {
result = strings.TrimRightFunc(token, func(input rune) bool {
return input == ']'
})
return result
}
|
package tiltfile
import (
"context"
dockertypes "github.com/docker/docker/api/types"
"github.com/tilt-dev/tilt/internal/analytics"
)
// reportDockerConnectionEvent records a metric about Docker connectivity.
func (r *Reconciler) reportDockerConnectionEvent(ctx context.Context, success bool, serverVersion dockertypes.Version) {
r.dockerConnectMetricReporter.Do(func() {
var status string
if success {
status = "connected"
} else {
status = "error"
}
tags := map[string]string{
"status": status,
}
if serverVersion.Version != "" {
tags["server.version"] = serverVersion.Version
}
if serverVersion.Arch != "" {
tags["server.arch"] = serverVersion.Arch
}
analytics.Get(ctx).Incr("api.tiltfile.docker.connect", tags)
})
}
|
package parsefb
import (
"github.com/PuerkitoBio/goquery"
"strings"
)
type FBPostData struct {
PostUrl string
TimeStamp string
ProfileLink *ProfileLink
ImageUrl string
Content string
Summary string
Title string
Author string
Tags string
}
func ParsePost(s, posturl string) (*FBPostData, error) {
fb := FBPostData{PostUrl: posturl}
doc, err := goquery.NewDocumentFromReader(strings.NewReader(s))
if err != nil {
return &fb, err
}
fb.TimeStamp, err = GetTimeStamp(doc)
if err != nil {
return &fb, err
}
fb.ProfileLink, err = GetProfileLink(doc)
if err != nil {
return &fb, err
}
fb.ImageUrl, err = GetImageUrl(doc)
if err != nil {
return &fb, err
}
fb.Content, err = GetContent(doc)
if err != nil {
return &fb, err
}
return &fb, nil
}
func Parse(url string) (*FBPostData, error) {
doc, err := goquery.NewDocument(url)
if err != nil {
return nil, err
}
if strings.Contains(url, ".blogspot.") {
return ParseBlogspotPost(doc)
}
// If not login, post looks like
// <div class="hidden_elem"><code id="u_0_p"><!-- ... --></code></div>
s := QuerySelector(doc, "div.hidden_elem > code")
cmt, err := s.Html()
if err != nil {
return nil, err
}
return ParsePost(cmt[5:len(cmt)-4], url)
}
|
package problem0055
func canJump(nums []int) bool {
if len(nums) == 1 {
return true
}
reachable := make([]bool, len(nums))
end := len(nums) - 1
for i := len(nums) - 2; i >= 0; i-- {
dist := end - i
if nums[i]-dist >= 0 {
reachable[i] = true
end = i
}
}
return reachable[0]
}
|
package main
import (
"bytes"
"fmt"
"github.com/madebymany/gowasd"
"log"
"strconv"
"strings"
)
type formatter interface {
printResolvedInstances([]gowasd.InstanceResolution)
printResolvedInstance(gowasd.InstanceResolution)
canOutputList() bool
}
type terminalFormatter struct {
output *log.Logger
}
type postgresEnvVarFormatter struct {
output *log.Logger
}
var postgresPropertyNames []string = []string{"database", "user", "password", "passfile", "service", "servicefile", "realm", "options", "appname", "sslmode", "requiressl", "sslcompression", "sslcert", "sslkey", "sslrootcert", "sslcrl", "requirepeer", "krbsrvname", "gsslib", "connect_timeout", "clientencoding", "datestyle", "tz", "geqo", "sysconfdir", "localedir"}
func (self terminalFormatter) printResolvedInstances(instances []gowasd.InstanceResolution) {
for n, i := range instances {
self.printResolvedInstance(i)
if n < len(instances)-1 {
self.output.Println("")
}
}
}
func (self terminalFormatter) printResolvedInstance(i gowasd.InstanceResolution) {
for _, e := range i.Targets {
port := strconv.Itoa(e.Port)
self.output.Print(fmt.Sprintf("⌁ %s\t%s\t%-s", i.DnsName(), e.Host, port))
}
for version, r := range i.Properties {
var fields = make([][]string, len(i.Properties[version]))
var count = 0
for k, v := range r {
fields[count] = []string{strconv.Itoa(version), k, v}
count++
}
self.output.Print(formatTable(fields, "✎ "))
}
return
}
func (self terminalFormatter) canOutputList() bool {
return true
}
func (self postgresEnvVarFormatter) printResolvedInstances(instances []gowasd.InstanceResolution) {
panic("unreachable")
}
func (self postgresEnvVarFormatter) printEnvVar(k, v string) {
v = strings.Replace(v, "'", "'\\''", -1)
self.output.Printf("export PG%s='%s'", strings.ToUpper(k), v)
}
func (self postgresEnvVarFormatter) printResolvedInstance(i gowasd.InstanceResolution) {
// XXX: no support for choosing a non-primary endpoint
ep := i.Targets[0]
self.printEnvVar("host", ep.Host)
self.printEnvVar("port", strconv.Itoa(ep.Port))
/* As far as I can see, the only inconsistency between environment variable
* and "connection parameter" names is in dbname/database. So I'll cover
* that one here.
*/
if v, ok := i.Properties[*version]["dbname"]; ok {
self.printEnvVar("database", v)
}
for _, n := range postgresPropertyNames {
if v, ok := i.Properties[*version][n]; ok {
self.printEnvVar(n, v)
}
}
}
func (self postgresEnvVarFormatter) canOutputList() bool {
return false
}
func formatTable(fields [][]string, linePrefix string) (out string) {
if len(fields) == 0 {
return
}
outBuf := new(bytes.Buffer)
numFields := len(fields[0])
maxIndex := numFields - 1
maxWidths := make([]int, numFields)
for _, f := range fields {
for i, c := range f {
if lenc := len(c); lenc > maxWidths[i] {
maxWidths[i] = lenc
}
}
}
for _, f := range fields {
outBuf.WriteString(linePrefix)
for i := 0; i < numFields; i++ {
c := f[i]
outBuf.WriteString(c)
if i < maxIndex {
outBuf.Write(
bytes.Repeat([]byte(" "), maxWidths[i]-len(c)+2))
}
}
outBuf.WriteRune('\n')
}
out = outBuf.String()
return
}
|
package syslog
import (
"log"
"os"
"os/signal"
"regexp"
"syscall"
"time"
"github.com/hpcloud/tail"
)
const timeFormat = "Jan 2 15:04:05"
var reply = regexp.MustCompile("^(.+) [a-z]+ dnsmasq.+: reply ([^ ]+) is ([^ ]+)")
var query = regexp.MustCompile("^(.+) [a-z]+ dnsmasq.+: query.A. ([^ ]+) from ([^ ]+)")
var ack = regexp.MustCompile("^(.+) [a-z]+ dnsmasq-dhcp.+: DHCPACK.+ ([^ ]+) ([^ ]+) ([^ ]+)")
// Device : A representation of a DHCP request
type Device struct {
At *time.Time
Hostname string
Mac string
IP string
}
// Request : A representation of a DNS request
type Request struct {
At *time.Time
Host string
Source string
Aliases map[string]string
}
// Tail will tail the log and send a device / request
// to the appropriate channel when it is found
func Tail(path string) (chan *Device, chan *Request, error) {
devices := make(chan *Device, 3)
requests := make(chan *Request, 10)
t, err := tail.TailFile(path, tail.Config{ReOpen: true, Follow: true})
if err != nil {
return nil, nil, err
}
go processFile(t, devices, requests)
return devices, requests, nil
}
func setupExitListener(t *tail.Tail) {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGINT)
go func() {
<-signals
t.Stop()
}()
}
func parseQuery(requests chan *Request, match *[]string) *Request {
at, _ := time.Parse(timeFormat, (*match)[1])
latest := &Request{&at, (*match)[2], (*match)[3], map[string]string{}}
log.Printf("Found request: %v\n", latest)
requests <- latest
return latest
}
func parseReply(latest *Request, match *[]string) {
latest.Aliases[(*match)[3]] = (*match)[2]
}
func parseAck(devices chan *Device, match *[]string) {
at, _ := time.Parse(timeFormat, (*match)[1])
device := &Device{&at, (*match)[4], (*match)[3], (*match)[2]}
log.Printf("Found device: %v\n", device)
devices <- device
}
func processFile(t *tail.Tail, devices chan *Device, requests chan *Request) {
var current *Request
count := 0
for line := range t.Lines {
if count%100 == 0 {
log.Printf("%d lines read\n", count)
}
count++
if match := query.FindStringSubmatch(line.Text); match != nil {
current = parseQuery(requests, &match)
} else if match = reply.FindStringSubmatch(line.Text); match != nil {
parseReply(current, &match)
} else if match = ack.FindStringSubmatch(line.Text); match != nil {
parseAck(devices, &match)
}
}
}
|
package vmodel
import (
"database/sql"
"errors"
"strings"
"gopkg.in/mgo.v2"
)
var (
// ErrCode is a config or an internal error
ErrCode = errors.New("Case statement in code is not correct.")
// ErrNoResult is a not results error
ErrNoResult = errors.New("Result not found.")
// ErrUnavailable is a database not available error
ErrUnavailable = errors.New("Database is unavailable.")
// ErrUnauthorized is a permissions violation
ErrUnauthorized = errors.New("User does not have permission to perform this operation.")
)
// standardizeErrors returns the same error regardless of the database used
func standardizeError(err error) error {
if err == sql.ErrNoRows || err == mgo.ErrNotFound {
return ErrNoResult
}
return err
}
// IsDuplicateEntry returns true if the error os "Duplicate entry"
func IsDuplicateEntry(err error) bool {
// ERROR 1062 (23000): Duplicate entry 'bbb@aaa.aaa' for key 'email'
if strings.Contains(err.Error(), "Duplicate entry") {
return true
}
return false
}
// if driverErr, ok := err.(*mysql.MySQLError); ok { // Now the error number is accessible directly
// if driverErr.Number == 1045 {
// // Handle the permission-denied error
// }
// }
|
package scanner
import (
"github.com/bingo-lang/bingo/token"
"io"
)
type Scanner struct {
source io.RuneReader
char rune
eof bool
}
func New(source io.RuneReader) *Scanner {
scanner := &Scanner{source: source}
scanner.advance()
return scanner
}
func (s *Scanner) ScanToken() token.Token {
s.removeSpace()
token := s.scanToken()
s.removeSpace()
return token
}
|
package cfg
import (
"os"
"strings"
)
type Config struct {
Host string
Port string
DBName string
VerificationsCollection string
ConfigBaseURL string
ConsumeBaseURL string
MongoAddr string
}
func New() *Config {
// default values
c := &Config{
Host: "localhost",
Port: "8080",
DBName: "anagog",
VerificationsCollection: "verifications",
ConfigBaseURL: "",
ConsumeBaseURL: "",
MongoAddr: "mongodb://localhost:27017",
}
if os.Getenv("HOST") != "" {
c.Host = os.Getenv("HOST")
}
if os.Getenv("PORT") != "" {
c.Port = os.Getenv("PORT")
}
if os.Getenv("DB_NAME") != "" {
c.DBName = os.Getenv("DB_NAME")
}
if os.Getenv("VERIFICATION_COLLECTION") != "" {
c.VerificationsCollection = os.Getenv("VERIFICATION_COLLECTION")
}
if os.Getenv("CONFIG_BASE_URL") != "" {
c.ConfigBaseURL = strings.TrimSuffix(os.Getenv("CONFIG_BASE_URL"), "/")
}
if os.Getenv("CONSUME_BASE_URL") != "" {
c.ConsumeBaseURL = strings.TrimSuffix(os.Getenv("CONSUME_BASE_URL"), "/")
}
if os.Getenv("MONGO_ADDR") != "" {
c.MongoAddr = os.Getenv("MONGO_ADDR")
}
return c
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package youtubemusic contains local Tast tests that exercise ytmusic.
package youtubemusic
import (
"context"
"time"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/arc/apputil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
const (
// AppName is the name of ARC app.
AppName = "YT Music"
// PkgName is the package name of ARC app.
PkgName = "com.google.android.apps.youtube.music"
idPrefix = PkgName + ":id/"
searchBtnObjID = idPrefix + "action_search_button"
searchTextObjID = idPrefix + "search_edit_text"
songBtnObjID = idPrefix + "chip_cloud_chip_text"
songNameObjID = idPrefix + "title"
subtitleObjID = idPrefix + "subtitle"
playerObjID = idPrefix + "player_control_play_pause_replay_button"
shorUITimeout = 5 * time.Second
defaultUITimeout = 30 * time.Second
longUITimeout = time.Minute
)
// YouTubeMusic holds resources of ARC app YT Music.
type YouTubeMusic struct {
*apputil.App
playingSong string
player *ui.Object
playerPlaying *ui.Object
playerPaused *ui.Object
}
var _ apputil.ARCMediaPlayer = (*YouTubeMusic)(nil)
// New returns YT Music instance.
func New(ctx context.Context, kb *input.KeyboardEventWriter, tconn *chrome.TestConn, a *arc.ARC) (*YouTubeMusic, error) {
app, err := apputil.NewApp(ctx, kb, tconn, a, AppName, PkgName)
if err != nil {
return nil, errors.Wrap(err, "failed to create arc resource")
}
return &YouTubeMusic{
App: app,
player: app.Device.Object(ui.ID(playerObjID)),
playerPlaying: app.Device.Object(ui.ID(playerObjID), ui.Description("Pause video")),
playerPaused: app.Device.Object(ui.ID(playerObjID), ui.Description("Play video")),
}, nil
}
// Close remove playrecord and close YouTubeMusic app.
func (yt *YouTubeMusic) Close(ctx context.Context, cr *chrome.Chrome, hasError func() bool, outDir string) error {
if err := yt.RemovePlayRecord(ctx); err != nil {
testing.ContextLog(ctx, "Failed to remove play record: ", err)
}
return yt.App.Close(ctx, cr, hasError, outDir)
}
// Play searches the given song and plays it.
func (yt *YouTubeMusic) Play(ctx context.Context, song *apputil.Media) error {
if err := yt.SkipPrompts(ctx); err != nil {
return err
}
if err := uiauto.Combine("search a new song to play",
yt.Search(song.Query),
apputil.FindAndClick(yt.Device.Object(ui.ID(songBtnObjID), ui.Text("Songs")), defaultUITimeout),
apputil.FindAndClick(yt.Device.Object(ui.ID(subtitleObjID), ui.Text(song.Subtitle)), defaultUITimeout), // Multiple songs with the same title might exist, hence, the subtitle is used.
)(ctx); err != nil {
return err
}
// Verify YouTubeMusic is playing.
// Long duration is essential as it is often that low end DUT takes a while to load the audio content to play.
if err := apputil.WaitForExists(yt.playerPlaying, longUITimeout)(ctx); err != nil {
return errors.Wrap(err, "failed to verify YouTubeMusic is playing")
}
yt.playingSong = song.Query
return nil
}
// PlayVideo searches the given video and plays it.
func (yt *YouTubeMusic) PlayVideo(ctx context.Context, media *apputil.Media) error {
if err := yt.SkipPrompts(ctx); err != nil {
return errors.Wrap(err, "failed to skip prompts")
}
if err := uiauto.Combine("search the video to play",
yt.Search(media.Query),
apputil.FindAndClick(yt.Device.Object(ui.ID(songBtnObjID), ui.Text("Videos")), defaultUITimeout),
apputil.FindAndClick(yt.Device.Object(ui.ID(songNameObjID), ui.Text(media.Subtitle)), defaultUITimeout),
)(ctx); err != nil {
return err
}
// Verify YouTubeMusic is playing.
// Long duration is essential as it is often that low end DUT takes a while to load the audio content to play.
if err := apputil.WaitForExists(yt.playerPlaying, longUITimeout)(ctx); err != nil {
return errors.Wrap(err, "failed to verify YouTubeMusic is playing")
}
yt.playingSong = media.Query
return nil
}
// RemovePlayRecord stops playing and removes the play record to avoid the mini player showing next time the app is launched.
// If the app launches with the mini player appearing, the uiautomator won't be able to be idle and therefore,
// couldn't examine the UI hierarchy and operate on any object.
func (yt *YouTubeMusic) RemovePlayRecord(ctx context.Context) error {
if yt.playingSong == "" {
return errors.New("app YouTubeMusic is not playing")
}
if err := uiauto.Combine("stop and remove play record",
apputil.ClickIfExist(yt.playerPlaying, defaultUITimeout),
apputil.SwipeRight(yt.Device.Object(ui.ID(songNameObjID), ui.Text(yt.playingSong)), 3),
apputil.WaitUntilGone(yt.player, defaultUITimeout),
)(ctx); err != nil {
return err
}
yt.playingSong = ""
return nil
}
// Pause stops youtube music.
func (yt *YouTubeMusic) Pause(ctx context.Context) error {
if err := apputil.FindAndClick(yt.playerPlaying, defaultUITimeout)(ctx); err != nil {
return errors.Wrap(err, "failed to pause")
}
if exist, err := apputil.CheckObjectExists(ctx, yt.playerPaused, defaultUITimeout); err != nil {
return errors.Wrap(err, "failed to verify YouTubeMusic is paused")
} else if !exist {
return errors.Errorf("the YouTube Music is not paused within %s", defaultUITimeout)
}
return nil
}
// Resume resumes youtube music.
func (yt *YouTubeMusic) Resume(ctx context.Context) error {
if err := apputil.FindAndClick(yt.playerPaused, defaultUITimeout)(ctx); err != nil {
return errors.Wrap(err, "failed to resume")
}
// Long duration is essential as it is often that low end DUT takes a while to load the audio content to play.
if exist, err := apputil.CheckObjectExists(ctx, yt.playerPlaying, longUITimeout); err != nil {
return errors.Wrap(err, "failed to verify YouTubeMusic is playing")
} else if !exist {
return errors.Errorf("the YouTube Music is not resumed within %s", longUITimeout)
}
return nil
}
// SkipPrompts skips multiple prompts.
// Click the button to close any redundant windows that appear, but we don't need to stop the test if no window appears.
func (yt *YouTubeMusic) SkipPrompts(ctx context.Context) error {
testing.ContextLog(ctx, "Clearing prompts")
if err := apputil.DismissMobilePrompt(ctx, yt.Tconn); err != nil {
return errors.Wrap(err, `failed to dismiss "This app is designed for mobile" prompt`)
}
prompts := []struct {
obj *ui.Object
name string
cleared bool
}{
{yt.Device.Object(ui.Text("DISMISS")), "DISMISS", false},
{yt.Device.Object(ui.TextStartsWith("SKIP")), "SKIP", false},
{yt.Device.Object(ui.DescriptionStartsWith("SKIP")), "SKIP", false},
{yt.Device.Object(ui.Text("NO, THANKS")), "NO, THANKS", false},
{yt.Device.Object(ui.Text("NO THANKS")), "NO THANKS", false},
{yt.Device.Object(ui.Description("NO THANKS")), "NO THANKS", false},
{yt.Device.Object(ui.Description("Close")), "Close", false},
{yt.Device.Object(ui.Text("NOT NOW")), "NOT NOW", false},
}
// The occuring of the prompts is random. Instead of waiting a longer time for each
// prompt, we repeatedly check the prompts with short wait time but high frequency.
// The total check time is controlled under 30 seconds.
totalClearTime := 30 * time.Second
totalCleared := 0
timeout := 2 * time.Second
clearFail := false // Indicate if there are UI error when clearing prompts.
err := testing.Poll(ctx, func(c context.Context) error {
for _, prompt := range prompts {
if prompt.cleared {
continue
}
if err := prompt.obj.WaitForExists(ctx, timeout); err != nil {
if ui.IsTimeout(err) {
continue
}
clearFail = true
return testing.PollBreak(errors.Wrap(err, "failed to wait for the target object"))
}
if err := prompt.obj.Click(ctx); err != nil {
clearFail = true
return testing.PollBreak(errors.Wrap(err, "failed to click ui object to clear prompts"))
}
prompt.cleared = true
totalCleared++
testing.ContextLogf(ctx, "Prompt %q has been cleared", prompt.name)
}
if totalCleared >= len(prompts) {
return nil
}
return errors.New("not all prompts have been cleared")
}, &testing.PollOptions{Timeout: totalClearTime, Interval: time.Second})
testing.ContextLogf(ctx, "Total %d prompt(s) have been cleared", totalCleared)
if err != nil && clearFail {
return errors.Wrap(err, "failed to clear prompts")
}
// All prompts have been cleared, or timed out to wait for prompts to occur.
return nil
}
// Search searches the source to play.
func (yt *YouTubeMusic) Search(query string) uiauto.Action {
return uiauto.Combine("search for the source to play",
apputil.FindAndClick(yt.Device.Object(ui.ID(searchBtnObjID)), defaultUITimeout),
apputil.FindAndClick(yt.Device.Object(ui.ID(searchTextObjID)), defaultUITimeout),
yt.KB.TypeAction(query),
yt.KB.AccelAction("Enter"),
)
}
|
package historyentries
import (
"bufio"
"strconv"
"strings"
logrus "github.com/Sirupsen/logrus"
"github.com/9seconds/ah/app/environments"
"github.com/9seconds/ah/app/utils"
)
var (
bashTimestampRegexp = utils.CreateRegexp(`^#\s*\d+$`)
zshLineRegexp = utils.CreateRegexp(`^: (\d+):\d;(.*?)$`)
fishCmdRegexp = utils.CreateRegexp(`^- cmd:\s*(.*?)$`)
fishWhenRegexp = utils.CreateRegexp(`\s*when:\s*(\d+)$`)
)
type (
// Parser is a signature for a function which parses file and returns a Keeper.
Parser func(Keeper, *bufio.Scanner, *utils.Regexp, chan *HistoryEntry) (Keeper, error)
// ShellSpecificParser is a signature for a function which implements shell specific logic for parsing.
ShellSpecificParser func(Keeper, string, uint, *HistoryEntry, *utils.Regexp, chan *HistoryEntry) (bool, uint, *HistoryEntry)
)
func getParser(env *environments.Environment) Parser {
var currentNumber uint
var shellSpecific ShellSpecificParser
switch env.Shell {
case environments.ShellBash:
shellSpecific = parseBash
currentNumber = 1
case environments.ShellZsh:
shellSpecific = parseZsh
currentNumber = 0
default:
utils.Logger.Panicf("Unknown shell %v", env.Shell)
}
return func(keeper Keeper, scanner *bufio.Scanner, filter *utils.Regexp, historyChan chan *HistoryEntry) (Keeper, error) {
defer close(historyChan)
continueToConsume := false
currentEvent := keeper.Init()
for keeper.Continue() && scanner.Scan() {
text := scanner.Text()
utils.Logger.WithFields(logrus.Fields{
"text": text,
"continueToConsume": continueToConsume,
"currentEvent": currentEvent,
}).Info("Parse history line")
if continueToConsume {
utils.Logger.Info("Attach the line to the previous command")
currentEvent.command += "\n" + text
if strings.HasSuffix(text, `\`) {
continue
}
continueToConsume = false
utils.Logger.WithFields(logrus.Fields{
"event": currentEvent,
}).Info("Commit event")
currentEvent = keeper.Commit(currentEvent, historyChan)
}
if text == "" {
utils.Logger.Info("Skip empty line")
continue
}
continueToConsume, currentNumber, currentEvent = shellSpecific(
keeper,
text,
currentNumber,
currentEvent,
filter,
historyChan)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return keeper, nil
}
}
func parseBash(keeper Keeper, text string, currentNumber uint, currentEvent *HistoryEntry, filter *utils.Regexp,
historyChan chan *HistoryEntry) (bool, uint, *HistoryEntry) {
continueToConsume := false
if bashTimestampRegexp.Match(text) {
if converted, err := strconv.ParseInt(text[1:], 10, 64); err == nil {
utils.Logger.WithFields(logrus.Fields{
"timestamp": converted,
}).Info("Parse timestamp")
currentEvent.timestamp = converted
} else {
utils.Logger.WithFields(logrus.Fields{
"text": text,
"error": err,
}).Warn("Cannot parse timestamp")
}
} else {
if filter == nil || filter.Match(text) {
currentEvent.command = text
currentEvent.number = currentNumber
continueToConsume = strings.HasSuffix(text, "\\")
if !continueToConsume {
utils.Logger.WithFields(logrus.Fields{
"event": currentEvent,
}).Info("Commit event")
currentEvent = keeper.Commit(currentEvent, historyChan)
}
} else {
utils.Logger.Info("Skip text line because of the filter.")
}
currentNumber++
}
return continueToConsume, currentNumber, currentEvent
}
func parseZsh(keeper Keeper, text string, currentNumber uint, currentEvent *HistoryEntry, filter *utils.Regexp,
historyChan chan *HistoryEntry) (bool, uint, *HistoryEntry) {
continueToConsume := false
groups, err := zshLineRegexp.Groups(text)
if err != nil {
utils.Logger.WithFields(logrus.Fields{
"error": err,
}).Warn("Cannot parse current line, skip.")
return continueToConsume, currentNumber, currentEvent
}
timestamp, command := groups[0], groups[1]
currentNumber++
if filter != nil && !filter.Match(command) {
utils.Logger.Info("Skip text line because of the filter.")
return continueToConsume, currentNumber, currentEvent
}
converted, _ := strconv.ParseInt(timestamp, 10, 64)
currentEvent.command = command
currentEvent.number = currentNumber
currentEvent.timestamp = converted
continueToConsume = strings.HasSuffix(text, `\`)
if !continueToConsume {
utils.Logger.WithFields(logrus.Fields{
"event": currentEvent,
}).Info("Commit event")
currentEvent = keeper.Commit(currentEvent, historyChan)
}
return continueToConsume, currentNumber, currentEvent
}
|
package storage
import (
"math"
"runtime"
"sync"
)
type Word struct {
Word string
Count int64
}
var stringsChannel chan []string
//var wordsStorage map[string]int64
var wordsStorage []*Word
var mux sync.RWMutex
func StartDispatching() {
//wordsStorage = make(map[string]int64)
stringsChannel = make(chan []string, 10)
go dispatch()
}
func TopWords(n int64) []string {
// lock mutex for reading
mux.RLock()
defer mux.RUnlock()
var wordsResult []string
// calc words in storage
wordsCount := float64(len(wordsStorage))
num := int64(math.Min(wordsCount, float64(n)))
// get first <num> words
for i := int64(0); i < num; i++ {
wordsResult = append(wordsResult, wordsStorage[i].Word)
}
return wordsResult
}
func dispatch() {
for {
select {
case words := <-stringsChannel:
// received words list, lock mutex for writing and add words to storage
mux.Lock()
for _, iterator := range words {
var this_word_new bool = true // is this word first time in storage
for _, storage_iterator := range wordsStorage {
if storage_iterator.Word == iterator {
// this word exists, let add 1 to count
this_word_new = false
storage_iterator.Count++
break
}
}
if this_word_new { // this word not exists, add it with count = 1
wordsStorage = append(wordsStorage, &Word{iterator, 1})
}
}
// sort words storage for fast getting top
SortWordsByCount(bycount).Sort(wordsStorage)
// print after sorting
//for _, iterator := range wordsStorage {
//fmt.Printf("%+v\n", iterator)
//}
mux.Unlock()
}
runtime.Gosched()
}
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/lib/pq"
)
const (
host = "localhost"
// email = "findme@scottlaing.ca"
port = 5432
user = "postgres"
dbname = "postgres"
password = "password"
)
type User struct {
gorm.Model
Name string
Email string `gorm:"not null;unique_index"`
}
type Order struct {
gorm.Model
UserID uint
Amount int
Description string
}
func main() {
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
host, port, user, password, dbname)
db, err := gorm.Open("postgres", psqlInfo)
if err != nil {
panic(err)
}
defer db.Close()
db.LogMode(true)
db.AutoMigrate(&User{}, &Order{})
var user User
db.First(&user)
if db.Error != nil {
panic(db.Error)
}
createOrder(db, user, 1001, "Fake Description #1")
createOrder(db, user, 9999, "Fake Description #2")
createOrder(db, user, 8822, "Fake Description #3")
}
func createOrder(db *gorm.DB, user User, amount int, desc string) {
db.Create(&Order{
UserID: user.ID,
Amount: amount,
Description: desc,
})
if db.Error != nil {
panic(db.Error)
}
}
|
package repository
//go:generate go run github.com/golang/mock/mockgen -source=$GOFILE -destination=mock/${GOFILE} -package=mock
import (
"context"
"github.com/traPtitech/trap-collection-server/src/domain/values"
)
type GameManagementRole interface {
AddGameManagementRoles(ctx context.Context, gameID values.GameID, userIDs []values.TraPMemberID, role values.GameManagementRole) error
UpdateGameManagementRole(ctx context.Context, gameID values.GameID, userID values.TraPMemberID, role values.GameManagementRole) error
RemoveGameManagementRole(ctx context.Context, gameID values.GameID, userID values.TraPMemberID) error
GetGameManagersByGameID(ctx context.Context, gameID values.GameID) ([]*UserIDAndManagementRole, error)
GetGameManagementRole(ctx context.Context, gameID values.GameID, userID values.TraPMemberID, lockType LockType) (values.GameManagementRole, error)
}
type UserIDAndManagementRole struct {
UserID values.TraPMemberID
Role values.GameManagementRole
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cups provides methods to coordinate with CUPS for printer handling.
package cups
import (
"context"
"chromiumos/tast/errors"
"chromiumos/tast/local/printing/printer"
"chromiumos/tast/local/upstart"
)
// RestartPrintingSystem restarts all of the printing-related processes, leaving the
// system in an idle state.
func RestartPrintingSystem(ctx context.Context) error {
if err := printer.ResetCups(ctx); err != nil {
return errors.Wrap(err, "failed to reset CUPS")
}
if err := upstart.RestartJob(ctx, "upstart-socket-bridge"); err != nil {
return errors.Wrap(err, "failed to restart upstart-socket-bridge")
}
return nil
}
|
package handler
import (
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/zhj0811/fabric-normal/apiserver/common"
"github.com/zhj0811/fabric-normal/apiserver/sdk"
"github.com/zhj0811/fabric-normal/common/logging"
)
var logger = logging.NewLogger("debug", "handler")
type Data struct{
Key string `json:"key"`
Value string `json:"value"`
}
func Invoke(ctx *gin.Context) {
req := &Data{}
err := ctx.ShouldBindJSON(req)
if err != nil {
logger.Errorf("Read request info failed %s", err.Error())
Response(ctx, err, common.RequestFormatErr, nil)
return
}
bytes, err := json.Marshal(req)
if err !=nil {
logger.Errorf("Marshal request info failed %s", err.Error())
Response(ctx, err, common.MarshalJSONErr, nil)
return
}
args := []string{"invoke", string(bytes)}
res, err := sdk.Invoke(args)
if err != nil {
logger.Errorf("Chaincode invoke failed %s", err.Error())
Response(ctx, err, common.InvokeErr, nil)
return
}
logger.Infof("Upload data %s success", req.Key)
Response(ctx, nil, common.Success, res.TransactionID)
return
}
func Query(ctx *gin.Context) {
tx := ctx.Query("key")
args := []string{"query", string(tx)}
res, err := sdk.Query(args)
if err != nil {
logger.Errorf("Chaincode query failed %s", err.Error())
Response(ctx, err, common.QueryErr, nil)
return
}
logger.Infof("Query res %+v", res)
Response(ctx, nil, common.Success, res)
return
}
|
package handlers
import (
"encoding/json"
"net/http"
"strconv"
database "../database"
. "../helpers"
. "../models"
"github.com/gorilla/mux"
)
func AllTodo(w http.ResponseWriter, r *http.Request) {
respond, err := json.Marshal(database.All())
CheckErr(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(respond))
}
func GetTodo(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key, _ := strconv.Atoi(vars["id"])
respond, err := json.Marshal(database.Get(key))
CheckErr(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(respond))
}
func GetDateTodo(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key, _ := vars["date"]
respond, err := json.Marshal(database.GetDate(key))
CheckErr(err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(respond))
}
func UpTodo(w http.ResponseWriter, r *http.Request) {
var todo Todo
var resNot RespondNotice
err := json.NewDecoder(r.Body).Decode(&todo)
CheckErr(err)
upCheck := database.Up(todo)
if upCheck {
resNot.Status = "success"
resNot.Notice = "Todo updated"
} else {
resNot.Status = "success"
resNot.Notice = "Todo not updated"
}
respond, _ := json.Marshal(resNot)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(respond))
}
func AddTodo(w http.ResponseWriter, r *http.Request) {
var todo Todo
var resNot RespondNotice
err := json.NewDecoder(r.Body).Decode(&todo)
CheckErr(err)
addCheck := database.Add(todo)
if addCheck {
resNot.Status = "success"
resNot.Notice = "Todo added"
} else {
resNot.Status = "error"
resNot.Notice = "Todo not added"
}
respond, _ := json.Marshal(resNot)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
w.Write([]byte(respond))
}
func DeleteTodo(w http.ResponseWriter, r *http.Request) {
var todo Todo
var resNot RespondNotice
err := json.NewDecoder(r.Body).Decode(&todo)
CheckErr(err)
deleteCheck := database.Delete(todo)
if deleteCheck {
resNot.Status = "success"
resNot.Notice = "Todo deleted"
} else {
resNot.Status = "error"
resNot.Notice = "Todo not deleted"
}
respond, _ := json.Marshal(resNot)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(respond))
}
|
package integration
import (
"net/http"
"encoding/json"
"fmt"
"time"
"strings"
"strconv"
"io"
"io/ioutil"
"errors"
"openreplay/backend/pkg/messages"
)
// Old name: asayerSessionId
// QUERY: what can be modified?
const RB_QUERY =
"SELECT item.id, item.title,body.message.openReplaySessionToken,item.level,"+
" item.counter,item.environment,body.crash_report.raw,body.message.body,timestamp"+
" FROM item_occurrence"+
" WHERE body.message.openReplaySessionToken != null"+
" AND timestamp>= %v"+
" AND item.level>30"+
" ORDER BY timestamp"+
" LIMIT 1000"
// ASC by default
// \n\t symbols can spoil the request body, so it wouldn't work (OR probably it happend because of job hashing)
/*
- `read` Access Token required
- timstamp in seconds
*/
type rollbar struct {
AccessToken string // `json:"access_token"`
}
type rollbarJobResponce struct {
Err int
Message string
Result struct {
Id int
}
}
type rollbarJobStatusResponce struct {
Err int
Result struct {
Status string
Result struct {
Rows [][] json.Number
Columns[] string
}
}
}
type rollbarEvent map[string]string
/*
It is possible to use /api/1/instances (20 per page)
Jobs for the identical requests are hashed
*/
func (rb *rollbar) Request(c *client) error {
fromTs := c.getLastMessageTimestamp() + 1000 // From next second
c.setLastMessageTimestamp(fromTs) // anti-job-hashing
fromTsSec := fromTs / 1e3
query := fmt.Sprintf(RB_QUERY, fromTsSec)
jsonBody := fmt.Sprintf(`{
"access_token": "%v",
"query_string": "%v"
}`, rb.AccessToken, query)
req, err := http.NewRequest("POST", "https://api.rollbar.com/api/1/rql/jobs", strings.NewReader(jsonBody))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Accept", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
// status != 200 || 201
// status can be 403 then should report about wrong token
if resp.StatusCode >= 400 {
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
return fmt.Errorf("Rollbar: server respond with the code %v", resp.StatusCode)
}
var jobResponce rollbarJobResponce
if err = json.NewDecoder(resp.Body).Decode(&jobResponce); err != nil {
return err
}
if jobResponce.Err != 0 {
return fmt.Errorf("Rollbar job responce error: %v", jobResponce.Message)
}
requestURL := fmt.Sprintf(
"https://api.rollbar.com/api/1/rql/job/%v?access_token=%v&expand=result",
jobResponce.Result.Id,
rb.AccessToken,
)
req, err = http.NewRequest("GET", requestURL, nil)
if err != nil {
return err
}
tick := time.Tick(5 * time.Second)
for {
<- tick
resp, err = http.DefaultClient.Do(req)
if err != nil {
return err // continue + timeout/maxAttempts
}
defer resp.Body.Close()
// status != 200
var jobStatus rollbarJobStatusResponce
err := json.NewDecoder(resp.Body).Decode(&jobStatus)
if err != nil {
return err
}
//todo: pagintation; limit: 1000
if jobStatus.Result.Status == "success" {
for _, row := range jobStatus.Result.Result.Rows {
e := make(rollbarEvent)
for i, col := range jobStatus.Result.Result.Columns {
//if len(row) <= i { error }
e[ col ] = row[ i ].String() // here I make them all string. That's not good
}
// sessionID, err := strconv.ParseUint(e[ "body.message.asayerSessionId" ], 10, 64)
// if err != nil {
// c.errChan <- err
// continue
// }
if e[ "body.message.openReplaySessionToken" ] == "" {
c.errChan <- errors.New("Token is empty!")
continue
}
payload, err := json.Marshal(e)
if err != nil {
c.errChan <- err
continue
}
timestampSec, err := strconv.ParseUint(e[ "timestamp" ], 10, 64)
if err != nil {
c.errChan <- err
continue
}
timestamp := timestampSec * 1000
c.setLastMessageTimestamp(timestamp)
c.evChan <- &SessionErrorEvent{
Token: e[ "body.message.openReplaySessionToken" ],
RawErrorEvent: &messages.RawErrorEvent{
Source: "rollbar",
Timestamp: timestamp,
Name: e[ "item.title" ],
Payload: string(payload),
},
}
}
break
}
if jobStatus.Result.Status != "new" &&
jobStatus.Result.Status != "running" {
// error
break
}
}
return nil
} |
package viper
import (
"github.com/spf13/viper"
"reflect"
"strings"
)
// CreateEnvViper create a new instance of Viper that support Environment variables binding to Struct.
// Example:
// os.Setenv("NAME", "TestViper")
// os.Setenv("APPLICATION_ID", "TestApp")
// os.Setenv("NESTED_ASTRING", "nested string")
// type NestedConfig struct {
// AString string `mapstructure:"astring"`
// }
// type Config struct {
// Name string `mapstructure:"name"`
// ApplicationID string `mapstructure:"application-id"`
// Nested NestedConfig `mapstructure:"nested"`
// }
//
// confReader := CreateEnvViper(Config{})
// c := &Config{}
// confReader.Unmarshal(c)
// fmt.Printf("%+v", c)
// Result will be: &{Name:TestViper ApplicationID:TestApp Nested:{AString:nested string}}
func CreateEnvViper(v *viper.Viper, ifaces ...interface{}) *viper.Viper {
if v == nil {
v = viper.New()
}
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
v.AutomaticEnv()
for _, iface := range ifaces {
BindEnvs(v, iface)
}
return v
}
// BindEnvs bind environment into struct, nested attribute was joined by "."
func BindEnvs(vp *viper.Viper, iface interface{}, parts ...string) {
ifv := reflect.ValueOf(iface)
ift := reflect.TypeOf(iface)
for i := 0; i < ift.NumField(); i++ {
v := ifv.Field(i)
t := ift.Field(i)
tv, ok := t.Tag.Lookup("mapstructure")
if !ok {
continue
}
switch v.Kind() {
case reflect.Struct:
BindEnvs(vp, v.Interface(), append(parts, tv)...)
default:
s := strings.Join(append(parts, tv), ".")
vp.BindEnv(s)
}
}
}
|
package downstream_adaptor
import (
"github.com/robotic-framework/robotic-client/internal/constants/enums"
"github.com/robotic-framework/robotic-client/internal/global"
"github.com/sirupsen/logrus"
"gobot.io/x/gobot"
"time"
)
var typeInitializer map[enums.DownstreamAdaptorType]initializer
func init() {
typeInitializer = make(map[enums.DownstreamAdaptorType]initializer)
typeInitializer[enums.DOWNSTREAM_ADAPTOR_TYPE__FIRMATA] = firmataAdaptorInitializer
typeInitializer[enums.DOWNSTREAM_ADAPTOR_TYPE__MSP] = mspAdaptorInitializer
}
type initializer func(config global.RobotConfiguration) DownstreamAdaptor
func NewDownstreamAdaptor(typ enums.DownstreamAdaptorType, config global.RobotConfiguration) DownstreamAdaptor {
initFunc, ok := typeInitializer[typ]
if !ok {
logrus.Panicf("cannot get initializer from downstream adaptor: %s", typ.String())
}
return initFunc(config)
}
type DownstreamAdaptor interface {
gobot.Adaptor
GetIdentity() (resp IdentityResp, err error)
GetStatus() (resp StatusResp, err error)
GetPID() (resp PIDResp, err error)
GetAttitude() (resp AttitudeResp, err error)
GetAltitude() (resp AltitudeResp, err error)
GetRawIMU() (resp RawIMUResp, err error)
GetServo() (resp ServoResp, err error)
GetServoConfig() (resp ServoConfigResp, err error)
GetMotor() (resp MotorResp, err error)
GetMotorPins() (resp MotorPinResp, err error)
AccCalibration() error
MagCalibration() error
}
func firmataAdaptorInitializer(config global.RobotConfiguration) DownstreamAdaptor {
firmataAdaptor := NewFirmataAdaptor(config.SelfDownstreamAdaptorFirmataName)
return firmataAdaptor
}
func mspAdaptorInitializer(config global.RobotConfiguration) DownstreamAdaptor {
return NewMSPAdaptor(config.SelfDownstreamAdaptorMSPName, time.Duration(config.SelfDownstreamAdaptorMSPReadyDuration))
}
|
package snorlax
import "errors"
var (
ErrSubNoMessageType = errors.New("no message type")
ErrSubUnknownMessageType = errors.New("unknown message type")
ErrSubUnmarshaling = errors.New("failed to unmarshal message")
)
|
package main
import (
"os"
"testing"
"time"
"github.com/cloudfoundry/jibber_jabber"
"github.com/stretchr/testify/assert"
)
func (result *BaseModel) zeroBaseModelDatesAndID() {
result.ID = 0
result.CreatedAt = time.Unix(int64(0), int64(0))
result.UpdatedAt = time.Unix(int64(0), int64(0))
}
func zeroNotesDatesAndID(results []Note) []Note {
for i, _ := range results {
results[i].zeroNoteDatesAndID()
}
return results
}
func (result *Note) zeroNoteDatesAndID() {
result.zeroBaseModelDatesAndID()
for i, _ := range result.Tags {
result.Tags[i].zeroBaseModelDatesAndID()
}
}
func getSystemLocalLoc() *time.Location {
// The representation of a postgres timestamp returned by the pg library is a time.Time with a
// Location corresponding to the country of the systems current locale, whereas when instantiating
// a time explicitly in golang by default the Location will be the golangs own abstraction for the
// systems current locale. This means two time.Time structs instantiated with simialar parametes
// will fail comparison if one is inserted and retrieved from a postgres database, so we discover
// the systems current locale (we make the assumption that this is what postgres is using as its
// locale) and we fetch the corresponding Location and transform any time.Time structs in expected
// results to that location
localeTerritory, err := jibber_jabber.DetectTerritory()
if err != nil {
panic(err)
}
localLoc, err := time.LoadLocation(localeTerritory)
if err != nil {
panic(err)
}
return localLoc
}
func TestEndToEnd(t *testing.T) {
expectedResults := []Note{
Note{
Content: `
Welcome to Catch.com
Catch helps you create, organize and sync notes between the web and your mobile devices.
Create it!
Let your mind take a break and offload your thoughts, findings, and images with us.
Organize it!
Simply prefix any word in a note with # to make it a tag: #example
Sync It!
Press the Settings button from the notes list to sign in or create a new Catch account. It's quick, free, and secure.
Your notes, anywhere, anytime: https://catch.com
email: feedback@catch.com
twitter: http://twitter.com/catch
#Welcome #Catch
`,
Source: "Catch",
ExternalID: "000000000000000022214014",
OriginalCreationTimestamp: time.Unix(1295218624, 0).In(getSystemLocalLoc()),
Tags: []Tag{
Tag{
Name: "example",
Notes: nil,
},
Tag{
Name: "welcome",
Notes: nil,
},
Tag{
Name: "catch",
Notes: nil,
},
},
},
}
db := setupDatabase(
"palimpest_test",
os.Getenv("DATABASE_HOST"),
os.Getenv("DATABASE_USER"),
os.Getenv("DATABASE_PASSWORD"),
)
db.Exec("truncate notes CASCADE; truncate tags CASCADE; truncate note_tag_mapping CASCADE;")
defer db.Close()
openDataAndIngest(db, os.Getenv("TEST_FIXTURES_DIR"))
results := queryData(db)
assert.Equal(t, zeroNotesDatesAndID(expectedResults), zeroNotesDatesAndID(results))
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"io"
"github.com/spf13/cobra"
"github.com/GoogleContainerTools/skaffold/v2/cmd/skaffold/app/flags"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/version"
)
var versionDefaultTemplate = "{{.Version}}\n"
var versionFlag = flags.NewTemplateFlag(versionDefaultTemplate, version.Info{})
func NewCmdVersion() *cobra.Command {
return NewCmd("version").
WithDescription("Print the version information").
WithFlags([]*Flag{
{Value: versionFlag, Name: "output", Shorthand: "o", DefValue: versionDefaultTemplate, Usage: versionFlag.Usage()},
}).
NoArgs(doVersion)
}
func doVersion(_ context.Context, out io.Writer) error {
return versionFlag.Template().Execute(out, version.Get())
}
|
package management
import "github.com/emicklei/go-restful"
func SetToken(request *restful.Request, token string) {
request.SetAttribute("token", token)
}
func GetToken(request *restful.Request) string {
token := request.Attribute("token")
if token == nil {
return ""
}
return token.(string)
}
|
package write
type Widget struct {
Description string `json:"description"`
Owner string `json:"owner"`
Value int `json:"value"`
}
|
// Copyright (c) 2018-present, MultiVAC Foundation.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package wire
import (
"bytes"
"errors"
"fmt"
"github.com/prometheus/common/log"
"io"
"github.com/multivactech/MultiVAC/base/rlp"
"github.com/multivactech/MultiVAC/model/merkle"
)
// MsgTxWithProofs is used for propagating pending transactions between nodes.
type MsgTxWithProofs struct {
Tx MsgTx
// Merkle path in the tree of all OutStates in the shard.
Proofs []merkle.MerklePath
}
// Command returns the command string.
func (msg *MsgTxWithProofs) Command() string {
return CmdTxWithProofs
}
// BtcDecode decode the message.
func (msg *MsgTxWithProofs) BtcDecode(r io.Reader, _ uint32, _ MessageEncoding) error {
return rlp.Decode(r, msg)
}
// Deserialize deserialize the message.
func (msg *MsgTxWithProofs) Deserialize(r io.Reader) error {
return msg.BtcDecode(r, 0, BaseEncoding)
}
// BtcEncode encode the message.
func (msg *MsgTxWithProofs) BtcEncode(w io.Writer, _ uint32, _ MessageEncoding) error {
return rlp.Encode(w, msg)
}
// Serialize serialize the message.
func (msg *MsgTxWithProofs) Serialize(w io.Writer) error {
return msg.BtcEncode(w, 0, BaseEncoding)
}
// MaxPayloadLength returns max block playload.
func (msg *MsgTxWithProofs) MaxPayloadLength(_ uint32) uint32 {
return MaxBlockPayload
}
// ToBytesArray serialize the message and return byte array.
func (msg *MsgTxWithProofs) ToBytesArray() []byte {
var buf bytes.Buffer
err := msg.BtcEncode(&buf, 0, BaseEncoding)
if err != nil {
log.Errorf("failed to encode message,err:%v", err)
return nil
}
return buf.Bytes()
}
// VerifyTxWithProof verifies whether the msg is valid.
// 1. Verifies whether the transaction itself is valid, see MsgTx#verifyTransaction.
// 2. Verifies all the proofs are valid.
// 3. Verifies for each TxIn, there's a corresponding proof.
func (msg *MsgTxWithProofs) VerifyTxWithProof(ledgerMerkleRoot *merkle.MerkleHash) error {
if err := msg.Tx.VerifyTransaction(); err != nil {
return err
}
if len(msg.Tx.TxIn) != len(msg.Proofs) {
return errors.New("length of txin is different from length of proofs")
}
// Verify all txin are match the proof.
for index, txIn := range msg.Tx.TxIn {
hash := merkle.ComputeMerkleHash(
txIn.PreviousOutPoint.ToUnspentOutState().ToBytesArray())
// Verify the proof of this out is valid
proof := msg.Proofs[index]
if *hash != *proof.GetLeaf() {
return fmt.Errorf("wrong proof for %d th txin", index)
}
if err := proof.Verify(ledgerMerkleRoot); err != nil {
return fmt.Errorf("invalid proof, err msg: %s", err)
}
}
return nil
}
|
package globals
import (
"errors"
"github.com/kr/pretty"
"googlemaps.github.io/maps"
)
var mapsClient *maps.Client
//InitMapClient : intializes the MapsClient
func InitMapClient(apiKey string) error {
mapsCandidate, err := maps.NewClient(maps.WithAPIKey(apiKey))
if err != nil {
pretty.Printf("fatal error: %s \n", err)
return err
}
mapsClient = mapsCandidate
return nil
}
//GetMapClient : returns the MapsClient
func GetMapClient() (*maps.Client, error) {
if mapsClient == nil {
return nil, errors.New("client not initialized")
}
return mapsClient, nil
}
|
package user
import (
"fmt"
"net/mail"
"github.com/jrapoport/gothic/api/grpc/rpc/admin"
"github.com/jrapoport/gothic/cmd/cli/root"
"github.com/jrapoport/gothic/core/context"
"github.com/spf13/cobra"
)
var hard bool
var deleteCmd = &cobra.Command{
Use: "delete [ID or EMAIL]",
RunE: deleteUserRunE,
Args: cobra.ExactArgs(1),
}
func init() {
fs := deleteCmd.Flags()
fs.BoolVarP(&hard, "hard", "h", false, "hard delete user")
}
func deleteUserRunE(_ *cobra.Command, args []string) error {
client, err := root.NewAdminClient()
if err != nil {
return err
}
defer func() {
client.Close()
}()
userID := args[0]
yes := root.ConfirmAction("Delete user %s", userID)
if !yes {
return nil
}
req := &admin.DeleteUserRequest{
User: &admin.DeleteUserRequest_UserId{UserId: userID},
Hard: hard,
}
addr, err := mail.ParseAddress(userID)
if err == nil {
req.User = &admin.DeleteUserRequest_Email{Email: addr.Address}
}
res, err := client.DeleteUser(context.Background(), req)
if err != nil {
return err
}
fmt.Printf("deleted user: %s\n", res.GetUserId())
return nil
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manifest
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
)
const (
manifestsStagingFolder = "manifest_tmp"
renderedManifestsStagingFile = "rendered_manifest.yaml"
gcsPrefix = "gs://"
)
var ManifestTmpDir = filepath.Join(os.TempDir(), manifestsStagingFolder)
// Write writes manifests to a file, a writer or a GCS bucket.
func Write(manifests string, output string, manifestOut io.Writer) error {
switch {
case output == "":
_, err := fmt.Fprintln(manifestOut, manifests)
return err
case strings.HasPrefix(output, gcsPrefix):
tempDir, err := os.MkdirTemp("", manifestsStagingFolder)
if err != nil {
return writeErr(fmt.Errorf("failed to create the tmp directory: %w", err))
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, renderedManifestsStagingFile)
if err := dumpToFile(manifests, tempFile); err != nil {
return err
}
gcs := util.Gsutil{}
if err := gcs.Copy(context.Background(), tempFile, output, false); err != nil {
return writeErr(fmt.Errorf("failed to copy rendered manifests to GCS: %w", err))
}
return nil
default:
return dumpToFile(manifests, output)
}
}
func dumpToFile(manifests string, filepath string) error {
f, err := os.Create(filepath)
if err != nil {
return fmt.Errorf("opening file for writing manifests: %w", err)
}
defer f.Close()
_, err = f.WriteString(manifests + "\n")
return err
}
|
/*
* Created on Sat Dec 15 2018 19:37:44
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
// two pointers
func isAlienSorted(words []string, order string) bool {
order_num := make(map[byte]int)
for i, v := range order {
order_num[byte(v)] = i
}
for i := 0; i < len(words)-1; i++ {
p1, p2 := 0, 0
for p1 < len(words[i]) && p2 < len(words[i+1]) {
if order_num[words[i][p1]] < order_num[words[i+1][p2]] {
break
} else if order_num[words[i][p1]] > order_num[words[i+1][p2]] {
return false
}
p1, p2 = p1+1, p2+1
}
if p2 == len(words[i+1]) && p1 < len(words[i]) {
return false
}
}
return true
} |
package models
import (
"database/sql"
"fmt"
"log"
"os"
_ "github.com/mattn/go-sqlite3" // Register SQLite3 driver
"github.com/mitchellh/go-homedir"
)
// List represents a to-do list
type List struct {
id int
active bool
name string
}
// Todo represents a to-do item that belongs to a list
type Todo struct {
id int
done bool
name string
list int
}
// Notebook represents real notebook
type Notebook struct {
id int
pages int
title string
}
// ------------------------------------------------------------------------
// Init will create the database tables necessary for Mago
func Init() {
// Get the home directory for current user
homeDir, err := homedir.Dir()
if err != nil {
log.Fatal("Could not find home directory for current user: ", err)
}
// Conditionally create the ~/.mago directory, warning the user if the any file will be lost
if exist := os.IsExist(os.Mkdir(homeDir+"/.mago", 0744)); exist {
fmt.Print("If you continue, current Mago dirs and files will be lost and recreated.\nContinue? (y/n): ")
var choice string
_, err = fmt.Scanln(&choice)
if err != nil {
log.Fatal("Could not retrieve user input.")
}
if choice == "y" || choice == "Y" || choice == "yes" || choice == "YES" || choice == "Yes" {
// When running `mago init` for the nth time, just empty the ~/.mago directory
os.Remove(homeDir + "/.mago/mago.db")
} else {
fmt.Println("EARLY RETURN")
return
}
}
db, err := sql.Open("sqlite3", homeDir+"/.mago/mago.db")
if err != nil {
log.Fatal("Could not open/create database file: ", err)
}
defer db.Close()
sqlStmt := `create table lists (
id integer not null primary key,
active integer not null default 0,
name text not null);
create table if not exists todos (
id integer not null primary key,
done integer not null default 0,
name text not null,
list references list);
create table if not exists notebooks (
id integer not null primary key,
pages integer not null default 1,
title text not null);`
_, err = db.Exec(sqlStmt)
if err != nil {
log.Fatal("Could not initialize Mago's database: ", err)
}
}
// NewList creates a new List model to the database
func NewList(name string) (e error) {
e = nil
db, err := sql.Open("sqlite3", "~/.mago/mago.db")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// sqlStmt := ``
return
}
|
package client
import (
"fmt"
"hash/adler32"
"io/ioutil"
"log"
"net"
"os"
"sort"
"time"
errors "github.com/mohamedmahmoud97/Zuper-UDP/errors"
socket "github.com/mohamedmahmoud97/Zuper-UDP/socket"
"github.com/vmihailenco/msgpack"
)
var (
//AckFileCheck is channel on receiving ack on file request
AckFileCheck = make(chan uint32)
lastAck int32 = -1
buffer = make(map[int][]byte)
corruptProb int
fileName string
pckNo uint16
plp float32
flogC *os.File
)
//CreateClientSocket in client-side
func CreateClientSocket(localAddr *net.UDPAddr) *net.UDPConn {
conn, err := net.ListenUDP("udp", localAddr)
errors.CheckError(err)
return conn
}
//SendToServer the filename of the needed file
func SendToServer(conn *net.UDPConn, servAddr, localAddr *net.UDPAddr, window int, filename string, flogc *os.File) {
flogC = flogc
log.SetOutput(flogC)
log.Printf("client is requesting file %v from server ... \n", filename)
fmt.Printf("client is requesting file %v from server ... \n", filename)
fileName = filename
file := []byte(filename)
noOfBytes := uint16(len(file))
reqPacket := socket.Packet{Data: file, PckNo: 1, Len: noOfBytes, SrcAddr: localAddr, DstAddr: servAddr}
b, err := msgpack.Marshal(&reqPacket)
if err != nil {
panic(err)
}
log.SetOutput(flogC)
log.Println("Encoded the message ...")
fmt.Println("Encoded the message ...")
//send the message to the server
_, err = conn.WriteToUDP(b, servAddr)
errors.CheckError(err)
// conn.Close()
start := time.Now()
quit := make(chan uint32)
//check if the time exceeded or it received the ack
go fileTimer(start, quit)
exists, goSend := resendReq(quit)
if goSend {
SendToServer(conn, servAddr, localAddr, window, filename, flogC)
} else if !goSend {
if exists == 0 {
//file doesn't exists terminate program
log.Println("File doesn't exists ...")
fmt.Println("File doesn't exists ...")
os.Exit(1)
} else if exists == 1 {
//file exists don't do anything
}
quit <- 0
}
}
func sendResponse(conn *net.UDPConn, addr *net.UDPAddr, packet *socket.Packet) {
ack := socket.AckPacket{Seqno: packet.Seqno, SrcAddr: packet.DstAddr, DstAddr: packet.SrcAddr}
b, err := msgpack.Marshal(&ack)
if err != nil {
panic(err)
}
_, err = conn.WriteToUDP(b, addr)
errors.CheckError(err)
}
//ReceiveFromServer any ack packet
func ReceiveFromServer(conn *net.UDPConn, buf []byte, addr *net.UDPAddr, algo string) {
var packet socket.Packet
err := msgpack.Unmarshal(buf, &packet)
if err != nil {
panic(err)
}
log.SetOutput(flogC)
log.Printf("Delivered packet with seqno %v \n", packet.Seqno)
fmt.Printf("Delivered packet with seqno %v \n", packet.Seqno)
if packet.Cksum == adler32.Checksum(packet.Data) {
if algo == "sw" {
go sendResponse(conn, addr, &packet)
appendFile(packet.Data, packet.Seqno)
done := CheckOnPck(&packet, algo, buffer, fileName)
if done == 1 {
os.Exit(0)
}
} else if algo == "gbn" {
if int32(packet.Seqno) == lastAck+1 {
lastAck = int32(packet.Seqno)
fmt.Printf("last ack packet is %v\n", lastAck)
appendFile(packet.Data, packet.Seqno)
go sendResponse(conn, addr, &packet)
done := CheckOnPck(&packet, algo, buffer, fileName)
if done == 1 {
os.Exit(0)
}
} else if int32(packet.Seqno) > lastAck+1 && lastAck != -1 {
//change seqno of ack packet to last delivered packet
packet.Seqno = uint32(lastAck)
go sendResponse(conn, addr, &packet)
} else if int32(packet.Seqno) > lastAck+1 && lastAck == -1 {
}
} else if algo == "sr" {
appendFile(packet.Data, packet.Seqno)
go sendResponse(conn, addr, &packet)
done := CheckOnPck(&packet, algo, buffer, fileName)
if done == 1 {
os.Exit(0)
}
}
}
}
//ReceiveAckFromServer any packet
func ReceiveAckFromServer(buf []byte) {
var packet socket.AckPacket
err := msgpack.Unmarshal(buf, &packet)
if err != nil {
panic(err)
}
log.SetOutput(flogC)
log.Println("Received Ack of requested file packet ...")
fmt.Printf("Received Ack of requested file packet ... \n")
//a channel for sending seqno
AckFileCheck <- packet.Seqno
}
///append to buffer to build file later on
func appendFile(data []byte, seqno uint32) {
buffer[int(seqno)] = data
}
//build the requested file at the client-side
func buildFile(algo string, buffer map[int][]byte, filename string) {
log.SetOutput(flogC)
log.Println("Building File ... ")
fmt.Println("Building File ... ")
recData := make([]byte, pckNo*512)
// To store the keys in slice in sorted order
var keys []int
for k := range buffer {
keys = append(keys, k)
}
sort.Ints(keys)
// to store sorted buffer
for _, k := range keys {
for i := 0; i < len(buffer[k]); i++ {
recData = append(recData, buffer[k][i])
}
}
err := ioutil.WriteFile(filename, recData, 0644)
errors.CheckError(err)
log.SetOutput(flogC)
log.Println("Finished building file ... ")
fmt.Println("Finished building file ... ")
}
//CheckOnPck is checking if the packet is the last packet or not
func CheckOnPck(packet *socket.Packet, algo string, buffer map[int][]byte, filename string) int {
if packet.Seqno == 0 {
pckNo = packet.PckNo
return 0
} else if int(packet.Seqno) == int(packet.PckNo)-1 {
time.Sleep(5 * time.Millisecond)
buildFile(algo, buffer, filename)
return 1
} else {
return 0
}
}
|
package models
import (
"errors"
"fmt"
"time"
"github.com/beego/beego/v2/client/orm"
"github.com/mobilemindtec/go-utils/beego/db"
)
type UserRole struct {
Id int64 `form:"-" json:",string,omitempty"`
CreatedAt time.Time `orm:"auto_now_add;type(datetime)" json:"-"`
UpdatedAt time.Time `orm:"auto_now;type(datetime)" json:"-"`
User *User `orm:"rel(fk)"`
Role *Role `orm:"rel(fk)"`
Session *db.Session `orm:"-" inject:""`
}
func (this *UserRole) TableName() string {
return "user_roles"
}
func NewUserRole(session *db.Session) *UserRole {
return &UserRole{Session: session}
}
func (this *UserRole) IsPersisted() bool {
return this.Id > 0
}
func NewUserRoleWithRole(user *User, role *Role) *UserRole {
entity := UserRole{User: user, Role: role}
return &entity
}
func (this *UserRole) FindRoleByUser(user *User) *Role {
entity := new(UserRole)
query, _ := this.Session.Query(entity)
err := query.Filter("User", user).One(entity)
if err == orm.ErrNoRows {
return nil
}
this.Session.GetDb().LoadRelated(entity, "Role")
return entity.Role
}
func (this *UserRole) FindAllRolesByUser(user *User) *[]*Role {
results := new([]*UserRole)
query, _ := this.Session.Query(new(UserRole))
query.Filter("User", user).All(results)
roles := []*Role{}
for _, it := range *results {
this.Session.GetDb().LoadRelated(it, "Role")
roles = append(roles, it.Role)
}
return &roles
}
func (this *UserRole) HasRoles(user *User, roles ...string) bool {
userRoles := this.FindAllRolesByUser(user)
if userRoles != nil {
for _, it := range *userRoles {
for _, role := range roles {
if it.Authority == role {
return true
}
}
}
}
return false
}
func (this *UserRole) FindAllByRole(role *Role) (*[]*UserRole, error) {
results := []*UserRole{}
query, _ := this.Session.Query(new(UserRole))
_, err := query.Filter("Role", role).All(&results)
for _, it := range results {
this.Session.GetDb().LoadRelated(it, "Role")
this.Session.GetDb().LoadRelated(it, "User")
}
return &results, err
}
func (this *UserRole) FindAllByUser(user *User) (*[]*UserRole, error) {
results := []*UserRole{}
query, _ := this.Session.Query(new(UserRole))
_, err := query.Filter("User", user).All(&results)
for _, it := range results {
this.Session.GetDb().LoadRelated(it, "Role")
}
return &results, err
}
func (this *UserRole) FindByUser(user *User) (*UserRole, error) {
entity := new(UserRole)
query, _ := this.Session.Query(entity)
err := query.Filter("User", user).One(entity)
if err == orm.ErrNoRows {
return entity, nil
}
this.Session.GetDb().LoadRelated(entity, "Role")
return entity, err
}
func (this *UserRole) FindByUserAndRole(user *User, role *Role) (*UserRole, error) {
entity := new(UserRole)
query, _ := this.Session.Query(entity)
err := query.Filter("User", user).Filter("Role", role).One(entity)
if err == orm.ErrNoRows {
return entity, nil
}
return entity, err
}
func (this *UserRole) FindByUserAndAuthority(user *User, autority string) (*UserRole, error) {
entity := new(UserRole)
query, _ := this.Session.Query(entity)
err := query.Filter("User", user).Filter("Role__Authority", autority).One(entity)
if err == orm.ErrNoRows {
return entity, nil
}
return entity, err
}
func (this *UserRole) Create(user *User, autority string) error {
search := NewRole(this.Session)
role, err := search.FindByAuthority(autority)
if err != nil {
return err
}
if role == nil || !role.IsPersisted() {
return errors.New(fmt.Sprintf("role %v not found", autority))
}
entity, err := this.FindByUserAndRole(user, role)
if err != nil && err != orm.ErrNoRows {
return err
}
if entity != nil && entity.IsPersisted() {
return nil
}
entity = &UserRole{User: user, Role: role}
return this.Session.Save(entity)
}
|
package hash
import "math"
func isPrime(n int) bool {
nsqrt := math.Sqrt(float64(n))
for i := 2; float64(i) <= nsqrt; i++ {
if n%i == 0 {
return false
}
}
return true
}
func firstNPrimes(n int) []int {
primes := make([]int, n)
for i, x := 0, 2; i < n; x++ {
if isPrime(x) {
primes[i] = x
i++
}
}
return primes
}
// fracBin returns the first n bits of fractional part of float f.
func fracBin(f float64, n int) uint64 {
f -= math.Floor(f) // get only the fractional part
f *= math.Pow(2, float64(n)) // shift left
return uint64(f) // truncate and return
}
|
package models
import (
"crypto/md5"
"fmt"
"io"
)
// encrypt password
func EncryptPasswd(name, pass, salt string) string {
salt1 := "%$@w"
h := md5.New()
io.WriteString(h, salt1)
io.WriteString(h, name)
io.WriteString(h, salt)
io.WriteString(h, pass)
return fmt.Sprintf("%x", h.Sum(nil))
}
func VerifyPasswd(passwd, name, pass, salt string) bool {
return passwd == EncryptPasswd(name, pass, salt)
}
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
"goutils"
"log"
"reflect"
"time"
_ "github.com/go-sql-driver/mysql"
)
var (
db *sql.DB
)
type AllDataType struct {
ID int64 `gorm:"column:id;primary_key" json:"id"`
Varchar string `gorm:"column:varchar" json:"varchar"`
Tinyint int `gorm:"column:tinyint" json:"tinyint"`
Text string `gorm:"column:text" json:"text"`
Date time.Time `gorm:"column:date" json:"date"`
Smallint int `gorm:"column:smallint" json:"smallint"`
Mediumint int `gorm:"column:mediumint" json:"mediumint"`
Int int `gorm:"column:int" json:"int"`
Bigint int64 `gorm:"column:bigint" json:"bigint"`
Float float32 `gorm:"column:float" json:"float"`
Double float64 `gorm:"column:double" json:"double"`
Decimal float64 `gorm:"column:decimal" json:"decimal"`
Datetime time.Time `gorm:"column:datetime" json:"datetime"`
Timestamp time.Time `gorm:"column:timestamp" json:"timestamp"`
Time time.Time `gorm:"column:time" json:"time"`
Char string `gorm:"column:char" json:"char"`
Tinyblob []byte `gorm:"column:tinyblob" json:"tinyblob"`
Tinytext string `gorm:"column:tinytext" json:"tinytext"`
Blob []byte `gorm:"column:blob" json:"blob"`
Mediumblob []byte `gorm:"column:mediumblob" json:"mediumblob"`
Mediumtext string `gorm:"column:mediumtext" json:"mediumtext"`
Longblob []byte `gorm:"column:longblob" json:"longblob"`
Longtext string `gorm:"column:longtext" json:"longtext"`
Enum string `gorm:"column:enum" json:"enum"`
Set string `gorm:"column:set" json:"set"`
Bool int `gorm:"column:bool" json:"bool"`
Binary []byte `gorm:"column:binary" json:"binary"`
Varbinary []byte `gorm:"column:varbinary" json:"varbinary"`
}
// TableName sets the insert table name for this struct type
func (a *AllDataType) TableName() string {
return "all_data_types"
}
type Test1 struct {
a int
b string
c float64
d []int
e [8]string
}
func init() {
goutils.RegisterType((*Test1)(nil))
url := fmt.Sprintf("%s:%s@(%s:%v)/%s?charset=utf8&parseTime=True&loc=Local",
"jbex", "jbex", "127.0.0.1", 3306, "jbex_com")
dd, err := sql.Open("mysql", url)
if err != nil {
log.Fatal(err.Error())
}
db = dd
}
func testReflectNew() {
t := goutils.MakeInstance("Test1").(Test1)
tp := goutils.MakeInstancePtr("Test1").(*Test1)
t.a = 1
t.b = "b"
t.c = 3.14
t.d = []int{1, 2, 3}
t.e = [8]string{"a", "b", "c", "d", "e", "f", "g", "h"}
tp.a = 1
tp.b = "b"
tp.c = 3.14
tp.d = []int{1, 2, 3}
tp.e = [8]string{"a", "b", "c", "d", "e", "f", "g", "h"}
if fmt.Sprint(t) != fmt.Sprint(*tp) {
panic("Reflect new test failed")
}
}
func testStructScan() {
rows, err := db.Query("select * from all_data_types")
if err != nil {
panic("query db failed")
}
var v interface{}
for rows.Next() {
v, err = goutils.StructScan(rows, reflect.TypeOf(AllDataType{}))
if err != nil {
panic(err)
}
x := v.(AllDataType)
bb, _ := json.Marshal(x)
fmt.Println(string(bb))
}
}
func main() {
testReflectNew()
testStructScan()
}
|
package gmb
import "strings"
const (
colon = ":"
slash = "/"
left = "{"
right = "}"
breaks = left + right
)
var contractions = map[string]string{
"@uuid@": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
"@num@": "[0-9]+",
"@alpha@": "[a-zA-Z]+",
"@alphanum@": "[a-zA-Z0-9]+",
}
// RegisterRegex adds new contraction with name and regex.
// Then you can use it in your routes as @name@
// gmb.RegisterRegex("name", "[A-Z][a-z]+")
// router.GET("/{param:@name@}/ok", handler)
func RegisterRegex(name, regex string) {
contractions["@"+name+"@"] = regex
}
var aliases = map[string]string{}
// RegisterAlias adds new alias for REST variable.
// It's usefull when you want to replace all similar variables in your routes.
func RegisterAlias(what, to string) {
what = strings.Trim(what, breaks)
to = strings.Trim(to, breaks)
aliases[what] = to
}
// RegisterAliases adds multiple aliases for REST variables.
// For more information please look at RegisterAlias.
func RegisterAliases(whatTo map[string]string) {
for what, to := range whatTo {
RegisterAlias(what, to)
}
}
// c replaces contractions in uri string
func c(uri string) string {
uriParts := strings.Split(uri, slash)
for i := 0; i < len(uriParts); i++ {
part := uriParts[i]
if !strings.HasPrefix(part, left) || !strings.HasSuffix(part, right) {
continue
}
s := strings.Trim(part, breaks)
for what, to := range aliases {
if s == what {
s = to
}
}
if !strings.Contains(s, colon) {
continue
}
paramParts := strings.SplitN(s, colon, 2)
tpl := paramParts[1]
if v, yes := contractions[tpl]; yes {
uriParts[i] = left + strings.Replace(s, tpl, v, 1) + right
}
}
return strings.Join(uriParts, slash)
}
|
package main
//Valid
//The inner scope of for can access the init variables that aslo can be shawdoed inside
func f () {
for a:= 0; a < 10; a ++ {
print(a)
}
} |
package server
import (
"bufio"
"os"
"path/filepath"
"strings"
"log"
)
// ReadSchemaFromPath reads a schema from path
func ReadGraphQLSchemaFromPath(path string) string {
var s strings.Builder
files := []string{}
err := filepath.Walk(path, func(path string, f os.FileInfo, err error) error {
if strings.Contains(path, ".graphql") {
files = append(files, path)
}
return nil
})
if err != nil {
log.Print(err)
return ""
}
for _, f := range files {
file, err := os.Open(f)
if err != nil {
log.Print(err)
return ""
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
_, err = s.Write(scanner.Bytes())
if err != nil {
log.Print(err)
return ""
}
_, err = s.WriteString("\n")
if err != nil {
log.Print(err)
return ""
}
}
}
return s.String()
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"time"
"chromiumos/tast/common/hwsec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/remote/rollback"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: EnterpriseRollbackInPlace,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Check the enterprise rollback data restore mechanism while faking a rollback on one image",
Contacts: []string{
"mpolzer@google.com", // Test author
"crisguerrero@chromium.org",
"chromeos-commercial-remote-management@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"reboot", "chrome"},
ServiceDeps: []string{
"tast.cros.hwsec.OwnershipService",
"tast.cros.autoupdate.RollbackService",
},
Timeout: 10 * time.Minute,
})
}
// EnterpriseRollbackInPlace does not expect to use enrollment so any
// functionality that depend on the enrollment of the device should be not be
// added to this test.
func EnterpriseRollbackInPlace(ctx context.Context, s *testing.State) {
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 3*time.Minute)
defer cancel()
defer func(ctx context.Context) {
if err := rollback.ClearRollbackAndSystemData(ctx, s.DUT(), s.RPCHint()); err != nil {
s.Error("Failed to clean rollback data after test: ", err)
}
}(cleanupCtx)
if err := rollback.SimulatePowerwash(ctx, s.DUT(), s.RPCHint()); err != nil {
s.Fatal("Failed to simulate powerwash before test: ", err)
}
networksInfo, err := rollback.ConfigureNetworks(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to configure networks: ", err)
}
sensitive, err := rollback.SaveRollbackData(ctx, s.DUT())
if err != nil {
s.Fatal("Failed to save rollback data: ", err)
}
// Ineffective reset is ok here as the device steps through oobe automatically
s.Log("Simulating powerwash and rebooting the DUT to fake rollback")
if err := rollback.SimulatePowerwashAndReboot(ctx, s.DUT()); err != nil && !errors.Is(err, hwsec.ErrIneffectiveReset) {
s.Fatal("Failed to simulate powerwash and reboot to fake an enterprise rollback: ", err)
}
if err := rollback.VerifyRollbackData(ctx, s.DUT(), s.RPCHint(), networksInfo, sensitive); err != nil {
s.Fatal("Failed to verify rollback: ", err)
}
}
|
package pkggraph
import (
"sort"
"golang.org/x/tools/go/packages"
)
func allImportsCache(pkgs map[string]*packages.Package) map[string][]string {
cache := map[string][]string{}
var fetch func(p *packages.Package) []string
fetch = func(p *packages.Package) []string {
if n, ok := cache[p.ID]; ok {
return n
}
// prevent cycles
cache[p.ID] = []string{}
var xs []string
for _, child := range p.Imports {
xs = includePackageID(xs, child.ID)
for _, pkg := range fetch(child) {
xs = includePackageID(xs, pkg)
}
}
cache[p.ID] = xs
return xs
}
for _, p := range pkgs {
_ = fetch(p)
}
return cache
}
func includePackageID(xs []string, p string) []string {
if !hasPackageID(xs, p) {
xs = append(xs, p)
sort.Strings(xs)
}
return xs
}
func hasPackageID(xs []string, p string) bool {
for _, x := range xs {
if x == p {
return true
}
}
return false
}
|
package almanack
import (
"context"
"encoding/json"
"fmt"
"html"
"log"
"regexp"
"strconv"
"strings"
"github.com/spotlightpa/almanack/internal/stringutils"
)
func (arcStory *ArcStory) ToArticle(ctx context.Context, svc Service, article *SpotlightPAArticle) (err error) {
var body strings.Builder
if article.Warnings, err = readContentElements(ctx, svc, arcStory.ContentElements, &body); err != nil {
return
}
article.Body = body.String()
if len(article.Warnings) > 0 {
article.ScheduleFor = nil
}
// Don't process anything else if this has been saved before
if !article.LastArcSync.IsZero() {
return
}
// Hacky: Add the of/for XX orgs then remove them
article.Authors = make([]string, len(arcStory.Credits.By))
for i := range arcStory.Credits.By {
article.Authors[i] = authorFrom(&arcStory.Credits.By[i])
}
article.Byline = commaAndJoiner(article.Authors)
for i := range article.Authors {
if pos := strings.Index(article.Authors[i], " of "); pos != -1 {
article.Authors[i] = article.Authors[i][:pos]
} else if pos := strings.Index(article.Authors[i], " for "); pos != -1 {
article.Authors[i] = article.Authors[i][:pos]
}
}
article.ArcID = arcStory.ID
article.InternalID = arcStory.Slug
article.Slug = slugFromURL(arcStory.CanonicalURL)
article.PubDate = arcStory.Planning.Scheduling.PlannedPublishDate
article.Budget = arcStory.Planning.BudgetLine
article.Hed = arcStory.Headlines.Basic
article.Subhead = arcStory.Subheadlines.Basic
article.Summary = arcStory.Description.Basic
article.Blurb = arcStory.Description.Basic
article.LinkTitle = arcStory.Headlines.Web
setArticleImage(article, arcStory.PromoItems)
if strings.HasPrefix(article.ImageURL, "http") {
var imgerr error
article.ImageURL, imgerr = svc.ReplaceImageURL(
ctx, article.ImageURL, article.ImageDescription, article.ImageCredit)
if imgerr != nil {
article.Warnings = append(article.Warnings, imgerr.Error())
}
}
if len(article.Warnings) > 0 {
article.ScheduleFor = nil
}
return
}
// Must keep in sync with Vue's ArcArticle.authors
func authorFrom(by *By) string {
byline := by.AdditionalProperties.Original.Byline
if byline != "" {
return byline
}
byline = by.Name
// Hack for bad names with orgs in them
if strings.Contains(byline, " of ") {
return byline
}
if org := strings.TrimSpace(by.Org); org != "" {
return byline + " of " + org
}
return byline
}
func commaAndJoiner(ss []string) string {
if len(ss) < 3 {
return strings.Join(ss, " and ")
}
commaPart := strings.Join(ss[:len(ss)-1], ", ")
return commaPart + " and " + ss[len(ss)-1]
}
func slugFromURL(s string) string {
stop := strings.LastIndexByte(s, '-')
if stop == -1 {
return s
}
start := strings.LastIndexByte(s[:stop], '/')
if start == -1 {
return s
}
return s[start+1 : stop]
}
func readContentElements(ctx context.Context, svc Service, rawels []*json.RawMessage, body *strings.Builder) (warnings []string, err error) {
for i, raw := range rawels {
var _type string
wrapper := ContentElementType{Type: &_type}
if err := json.Unmarshal(*raw, &wrapper); err != nil {
log.Printf("runtime error: %v", err)
}
var graf string
switch _type {
case "text", "raw_html":
wrapper := ContentElementText{Content: &graf}
if err := json.Unmarshal(*raw, &wrapper); err != nil {
return nil, err
}
case "header":
var v ContentElementHeading
if err := json.Unmarshal(*raw, &v); err != nil {
log.Printf("runtime error: %v", err)
}
graf = strings.Repeat("#", v.Level) + " " + v.Content
case "oembed_response":
var v ContentElementOembed
if err := json.Unmarshal(*raw, &v); err != nil {
return nil, err
}
graf = v.RawOembed.HTML
case "list":
var v ContentElementList
if err := json.Unmarshal(*raw, &v); err != nil {
return nil, err
}
var buf strings.Builder
n := 0
switch v.ListType {
case "unordered":
n = -1
case "ordered":
n = 1
default:
warnings = append(warnings,
fmt.Sprintf("unknown list type: %q", v.ListType))
continue
}
for j, item := range v.Items {
if j != 0 {
buf.WriteString("\n\n")
}
var li string
switch item.Type {
case "text":
li = strings.TrimSpace(item.Content)
default:
warnings = append(warnings,
fmt.Sprintf("unknown list type: %q", v.ListType))
continue
}
if n < 1 {
buf.WriteString("- ")
} else {
buf.WriteString(strconv.Itoa(n))
buf.WriteString(". ")
n++
}
buf.WriteString(li)
}
graf = buf.String()
case "image":
var v ContentElementImage
if err := json.Unmarshal(*raw, &v); err != nil {
return nil, err
}
var credits []string
for _, c := range v.Credits.By {
credits = append(credits, c.Name)
}
credit := fixCredit(strings.Join(credits, " "))
imageURL := v.AdditionalProperties.ResizeURL
if imageURL == "" && strings.Contains(v.URL, "public") {
imageURL = v.URL
}
if imageURL == "" {
warnings = append(warnings,
fmt.Sprintf("could not find public image for %q", v.URL))
continue
}
u, imgerr := svc.ReplaceImageURL(ctx, imageURL, v.Caption, credit)
if imgerr != nil {
warnings = append(warnings, imgerr.Error())
}
u = html.EscapeString(u)
desc := html.EscapeString(v.Caption)
credit = html.EscapeString(credit)
graf = fmt.Sprintf(
`{{<picture src="%s" description="%s" caption="%s" credit="%s">}}`+"\n",
u, desc, desc, credit,
)
graf = strings.ReplaceAll(graf, "\n", " ")
case "gallery", "interstitial_link":
continue
case "divider":
graf = "<hr>"
default:
warnings = append(warnings,
fmt.Sprintf("unknown element type - %q", _type))
continue
}
if i != 0 {
body.WriteString("\n\n")
}
body.WriteString(graf)
}
return
}
func setArticleImage(a *SpotlightPAArticle, p PromoItems) {
a.ImageURL = p.Basic.AdditionalProperties.ResizeURL
if a.ImageURL == "" && strings.Contains(p.Basic.URL, "public") {
a.ImageURL = p.Basic.URL
}
var credits []string
for _, credit := range p.Basic.Credits.By {
credits = append(credits, stringutils.First(credit.Name, credit.Byline))
}
a.ImageCredit = fixCredit(strings.Join(credits, " / "))
a.ImageDescription = p.Basic.Caption
}
var fixcreditre = regexp.MustCompile(`(?i)\b(staff( photographer)?)\b`)
// change staff to inky
func fixCredit(s string) string {
return fixcreditre.ReplaceAllLiteralString(s, "Philadelphia Inquirer")
}
|
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package nodetag
import (
"bytes"
"context"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/common/signing"
)
var (
// SignatureErr means that the signature wash wrong.
SignatureErr = errs.Class("invalid signature")
// SerializationErr is returned when the tags are signed, but the payload couldn't be unmarshalled.
SerializationErr = errs.Class("invalid tag serialization")
// WrongSignee is returned when the tags are signed, but the signee field has a different NodeID.
WrongSignee = errs.Class("node id mismatch")
)
// Sign create a signed tag set from a raw one.
func Sign(ctx context.Context, tagSet *pb.NodeTagSet, signer signing.Signer) (*pb.SignedNodeTagSet, error) {
signed := &pb.SignedNodeTagSet{}
raw, err := proto.Marshal(tagSet)
if err != nil {
return nil, errs.Wrap(err)
}
signature, err := signer.HashAndSign(ctx, raw)
if err != nil {
return nil, errs.Wrap(err)
}
signed.Signature = signature
signed.SignerNodeId = signer.ID().Bytes()
signed.SerializedTag = raw
return signed, nil
}
// Verify checks the signature of a signed tag set.
func Verify(ctx context.Context, tags *pb.SignedNodeTagSet, signee signing.Signee) (*pb.NodeTagSet, error) {
if !bytes.Equal(tags.SignerNodeId, signee.ID().Bytes()) {
return nil, WrongSignee.New("wrong signee to verify")
}
err := signee.HashAndVerifySignature(ctx, tags.SerializedTag, tags.Signature)
if err != nil {
return nil, SignatureErr.Wrap(err)
}
tagset := &pb.NodeTagSet{}
err = proto.Unmarshal(tags.SerializedTag, tagset)
if err != nil {
return nil, SerializationErr.Wrap(err)
}
return tagset, nil
}
|
// Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package keys_test
import (
"bytes"
"encoding/hex"
"fmt"
"math"
"strconv"
"strings"
"testing"
"time"
"unicode/utf8"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/bitarray"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/keysutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
func lockTableKey(key roachpb.Key) roachpb.Key {
k, _ := keys.LockTableSingleKey(key, nil)
return k
}
func TestPrettyPrint(t *testing.T) {
tenSysCodec := keys.SystemSQLCodec
ten5Codec := keys.MakeSQLCodec(roachpb.MakeTenantID(5))
tm, _ := time.Parse(time.RFC3339Nano, "2016-03-30T13:40:35.053725008Z")
duration := duration.MakeDuration(1*time.Second.Nanoseconds(), 1, 1)
durationAsc, _ := encoding.EncodeDurationAscending(nil, duration)
durationDesc, _ := encoding.EncodeDurationDescending(nil, duration)
bitArray := bitarray.MakeBitArrayFromInt64(8, 58, 7)
txnID := uuid.MakeV4()
// Support for asserting that the ugly printer supports a key was added after
// most of the tests here were written.
revertSupportUnknown := false
revertMustSupport := true
// The following test cases encode keys with a mixture of ascending and descending direction,
// but always decode keys in the ascending direction. This is why some of the decoded values
// seem bizarre.
testCases := []struct {
key roachpb.Key
exp string
assertRevertSupported bool
}{
// local
{keys.StoreIdentKey(), "/Local/Store/storeIdent", revertSupportUnknown},
{keys.StoreGossipKey(), "/Local/Store/gossipBootstrap", revertSupportUnknown},
{keys.StoreClusterVersionKey(), "/Local/Store/clusterVersion", revertSupportUnknown},
{keys.StoreNodeTombstoneKey(123), "/Local/Store/nodeTombstone/n123", revertSupportUnknown},
{keys.StoreCachedSettingsKey(roachpb.Key("a")), `/Local/Store/cachedSettings/"a"`, revertSupportUnknown},
{keys.AbortSpanKey(roachpb.RangeID(1000001), txnID), fmt.Sprintf(`/Local/RangeID/1000001/r/AbortSpan/%q`, txnID), revertSupportUnknown},
{keys.RangeAppliedStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeAppliedState", revertSupportUnknown},
{keys.RaftAppliedIndexLegacyKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RaftAppliedIndex", revertSupportUnknown},
{keys.LeaseAppliedIndexLegacyKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/LeaseAppliedIndex", revertSupportUnknown},
{keys.RaftTruncatedStateLegacyKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RaftTruncatedState", revertSupportUnknown},
{keys.RaftTruncatedStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftTruncatedState", revertSupportUnknown},
{keys.RangeLeaseKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeLease", revertSupportUnknown},
{keys.RangePriorReadSummaryKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangePriorReadSummary", revertSupportUnknown},
{keys.RangeStatsLegacyKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeStats", revertSupportUnknown},
{keys.RangeLastGCKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeLastGC", revertSupportUnknown},
{keys.RangeVersionKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeVersion", revertSupportUnknown},
{keys.RaftHardStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftHardState", revertSupportUnknown},
{keys.RangeTombstoneKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeTombstone", revertSupportUnknown},
{keys.RaftLogKey(roachpb.RangeID(1000001), uint64(200001)), "/Local/RangeID/1000001/u/RaftLog/logIndex:200001", revertSupportUnknown},
{keys.RangeLastReplicaGCTimestampKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeLastReplicaGCTimestamp", revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(tenSysCodec.TablePrefix(42))), `/Local/Range/Table/42`, revertSupportUnknown},
{keys.RangeDescriptorKey(roachpb.RKey(tenSysCodec.TablePrefix(42))), `/Local/Range/Table/42/RangeDescriptor`, revertSupportUnknown},
{keys.TransactionKey(tenSysCodec.TablePrefix(42), txnID), fmt.Sprintf(`/Local/Range/Table/42/Transaction/%q`, txnID), revertSupportUnknown},
{keys.QueueLastProcessedKey(roachpb.RKey(tenSysCodec.TablePrefix(42)), "foo"), `/Local/Range/Table/42/QueueLastProcessed/"foo"`, revertSupportUnknown},
{lockTableKey(keys.RangeDescriptorKey(roachpb.RKey(tenSysCodec.TablePrefix(42)))), `/Local/Lock/Intent/Local/Range/Table/42/RangeDescriptor`, revertSupportUnknown},
{lockTableKey(tenSysCodec.TablePrefix(111)), "/Local/Lock/Intent/Table/111", revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(ten5Codec.TenantPrefix())), `/Local/Range/Tenant/5`, revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(ten5Codec.TablePrefix(42))), `/Local/Range/Tenant/5/Table/42`, revertSupportUnknown},
{keys.RangeDescriptorKey(roachpb.RKey(ten5Codec.TablePrefix(42))), `/Local/Range/Tenant/5/Table/42/RangeDescriptor`, revertSupportUnknown},
{keys.TransactionKey(ten5Codec.TablePrefix(42), txnID), fmt.Sprintf(`/Local/Range/Tenant/5/Table/42/Transaction/%q`, txnID), revertSupportUnknown},
{keys.QueueLastProcessedKey(roachpb.RKey(ten5Codec.TablePrefix(42)), "foo"), `/Local/Range/Tenant/5/Table/42/QueueLastProcessed/"foo"`, revertSupportUnknown},
{lockTableKey(keys.RangeDescriptorKey(roachpb.RKey(ten5Codec.TablePrefix(42)))), `/Local/Lock/Intent/Local/Range/Tenant/5/Table/42/RangeDescriptor`, revertSupportUnknown},
{lockTableKey(ten5Codec.TablePrefix(111)), "/Local/Lock/Intent/Tenant/5/Table/111", revertSupportUnknown},
{keys.LocalMax, `/Meta1/""`, revertSupportUnknown}, // LocalMax == Meta1Prefix
// system
{makeKey(keys.Meta2Prefix, roachpb.Key("foo")), `/Meta2/"foo"`, revertSupportUnknown},
{makeKey(keys.Meta1Prefix, roachpb.Key("foo")), `/Meta1/"foo"`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey("f")).AsRawKey(), `/Meta2/"f"`, revertSupportUnknown},
{keys.NodeLivenessKey(10033), "/System/NodeLiveness/10033", revertSupportUnknown},
{keys.NodeStatusKey(1111), "/System/StatusNode/1111", revertSupportUnknown},
{keys.SystemMax, "/System/Max", revertSupportUnknown},
// key of key
{keys.RangeMetaKey(roachpb.RKey(keys.MakeRangeKeyPrefix(roachpb.RKey(tenSysCodec.TablePrefix(42))))).AsRawKey(), `/Meta2/Local/Range/Table/42`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey(makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey("foo")))).AsRawKey(), `/Meta2/Table/42/"foo"`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey(makeKey(keys.Meta2Prefix, roachpb.Key("foo")))).AsRawKey(), `/Meta1/"foo"`, revertSupportUnknown},
// table
{keys.SystemConfigSpan.Key, "/Table/SystemConfigSpan/Start", revertSupportUnknown},
{keys.UserTableDataMin, "/Table/50", revertMustSupport},
{tenSysCodec.TablePrefix(111), "/Table/111", revertMustSupport},
{makeKey(tenSysCodec.TablePrefix(42), encoding.EncodeUvarintAscending(nil, 1)), `/Table/42/1`, revertMustSupport},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey("foo")), `/Table/42/"foo"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, float64(233.221112)))),
"/Table/42/233.221112", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatDescending(nil, float64(-233.221112)))),
"/Table/42/233.221112", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.Inf(1)))),
"/Table/42/+Inf", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.NaN()))),
"/Table/42/NaN", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222)),
roachpb.RKey(encoding.EncodeStringAscending(nil, "handsome man"))),
`/Table/42/1222/"handsome man"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222))),
`/Table/42/1222`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintDescending(nil, 1222))),
`/Table/42/-1223`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255}))),
`/Table/42/"\x01\x02\b\xff"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesDescending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullAscending(nil))), "/Table/42/NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullDescending(nil))), "/Table/42/NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullAscending(nil))), "/Table/42/!NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullDescending(nil))), "/Table/42/#", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeAscending(nil, tm))),
"/Table/42/2016-03-30T13:40:35.053725008Z", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeDescending(nil, tm))),
"/Table/42/1923-10-04T10:19:23.946274991Z", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalAscending(nil, apd.New(1234, -2)))),
"/Table/42/12.34", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalDescending(nil, apd.New(1234, -2)))),
"/Table/42/-12.34", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitArray))),
"/Table/42/B00111010", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayDescending(nil, bitArray))),
"/Table/42/B00111010", revertSupportUnknown},
// Regression test for #31115.
{roachpb.Key(makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitarray.MakeZeroBitArray(64))),
)).PrefixEnd(),
"/Table/42/B0000000000000000000000000000000000000000000000000000000000000000/PrefixEnd", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(durationAsc)),
"/Table/42/1 mon 1 day 00:00:01", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(durationDesc)),
"/Table/42/-2 mons -2 days +743:59:58.999999+999ns", revertSupportUnknown},
// sequence
{tenSysCodec.SequenceKey(55), `/Table/55/1/0/0`, revertSupportUnknown},
// tenant table
{ten5Codec.TenantPrefix(), "/Tenant/5", revertMustSupport},
{ten5Codec.TablePrefix(0), "/Tenant/5/Table/SystemConfigSpan/Start", revertSupportUnknown},
{ten5Codec.TablePrefix(keys.MinUserDescID), "/Tenant/5/Table/50", revertMustSupport},
{ten5Codec.TablePrefix(111), "/Tenant/5/Table/111", revertMustSupport},
{makeKey(ten5Codec.TablePrefix(42), encoding.EncodeUvarintAscending(nil, 1)), `/Tenant/5/Table/42/1`, revertMustSupport},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey("foo")), `/Tenant/5/Table/42/"foo"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, float64(233.221112)))),
"/Tenant/5/Table/42/233.221112", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatDescending(nil, float64(-233.221112)))),
"/Tenant/5/Table/42/233.221112", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.Inf(1)))),
"/Tenant/5/Table/42/+Inf", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.NaN()))),
"/Tenant/5/Table/42/NaN", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222)),
roachpb.RKey(encoding.EncodeStringAscending(nil, "handsome man"))),
`/Tenant/5/Table/42/1222/"handsome man"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222))),
`/Tenant/5/Table/42/1222`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintDescending(nil, 1222))),
`/Tenant/5/Table/42/-1223`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255}))),
`/Tenant/5/Table/42/"\x01\x02\b\xff"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Tenant/5/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesDescending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Tenant/5/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullAscending(nil))), "/Tenant/5/Table/42/NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullDescending(nil))), "/Tenant/5/Table/42/NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullAscending(nil))), "/Tenant/5/Table/42/!NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullDescending(nil))), "/Tenant/5/Table/42/#", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeAscending(nil, tm))),
"/Tenant/5/Table/42/2016-03-30T13:40:35.053725008Z", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeDescending(nil, tm))),
"/Tenant/5/Table/42/1923-10-04T10:19:23.946274991Z", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalAscending(nil, apd.New(1234, -2)))),
"/Tenant/5/Table/42/12.34", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalDescending(nil, apd.New(1234, -2)))),
"/Tenant/5/Table/42/-12.34", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitArray))),
"/Tenant/5/Table/42/B00111010", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayDescending(nil, bitArray))),
"/Tenant/5/Table/42/B00111010", revertSupportUnknown},
// Regression test for #31115.
{roachpb.Key(makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitarray.MakeZeroBitArray(64))),
)).PrefixEnd(),
"/Tenant/5/Table/42/B0000000000000000000000000000000000000000000000000000000000000000/PrefixEnd", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(durationAsc)),
"/Tenant/5/Table/42/1 mon 1 day 00:00:01", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(durationDesc)),
"/Tenant/5/Table/42/-2 mons -2 days +743:59:58.999999+999ns", revertSupportUnknown},
// sequence
{ten5Codec.SequenceKey(55), `/Tenant/5/Table/55/1/0/0`, revertSupportUnknown},
// others
{makeKey([]byte("")), "/Min", revertSupportUnknown},
{keys.Meta1KeyMax, "/Meta1/Max", revertSupportUnknown},
{keys.Meta2KeyMax, "/Meta2/Max", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0xf6})), `/Table/42/109/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0xf7})), `/Table/42/255/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x02})), `/Table/42/"a"/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x03})), `/Table/42/???`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0xf6})), `/Tenant/5/Table/42/109/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0xf7})), `/Tenant/5/Table/42/255/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x02})), `/Tenant/5/Table/42/"a"/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x03})), `/Tenant/5/Table/42/???`, revertSupportUnknown},
// Special characters.
{makeKey(tenSysCodec.TablePrefix(61),
encoding.EncodeBytesAscending(nil, []byte("☃⚠"))),
`/Table/61/"☃⚠"`, revertSupportUnknown,
},
// Invalid utf-8 sequence.
{makeKey(tenSysCodec.TablePrefix(61),
encoding.EncodeBytesAscending(nil, []byte{0xff, 0xff})),
`/Table/61/"\xff\xff"`, revertSupportUnknown,
},
}
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
keyPP := keys.PrettyPrint(nil /* valDirs */, test.key)
keyInfo := massagePrettyPrintedSpanForTest(keyPP, nil)
exp := massagePrettyPrintedSpanForTest(test.exp, nil)
t.Logf(`---- test case #%d:
input: %q
output: %s
exp: %s
`, i+1, []byte(test.key), keyInfo, exp)
if exp != keyInfo {
t.Errorf("%d: expected:\n%+v\ngot:\n%+v", i, []byte(exp), []byte(keyInfo))
}
if exp != massagePrettyPrintedSpanForTest(test.key.String(), nil) {
t.Errorf("%d: from string expected %s, got %s", i, exp, test.key.String())
}
scanner := keysutil.MakePrettyScanner(nil /* tableParser */)
parsed, err := scanner.Scan(keyInfo)
if err != nil {
if !errors.HasType(err, (*keys.ErrUglifyUnsupported)(nil)) {
t.Errorf("%d: %s: %s", i, keyInfo, err)
} else if !test.assertRevertSupported {
t.Logf("%d: skipping parsing of %s; key is unsupported: %v", i, keyInfo, err)
} else {
t.Errorf("%d: ugly print expected unexpectedly unsupported (%s)", i, test.exp)
}
} else if exp, act := test.key, parsed; !bytes.Equal(exp, act) {
t.Errorf("%d: ugly print expected '%q', got '%q'", i, exp, act)
}
if t.Failed() {
return
}
})
}
}
// massagePrettyPrintedSpanForTest does some transformations on pretty-printed spans and keys:
// - if dirs is not nil, replace all ints with their ones' complement for
// descendingly-encoded columns.
// - strips line numbers from error messages.
func massagePrettyPrintedSpanForTest(span string, dirs []encoding.Direction) string {
var r strings.Builder
colIdx := -1
for i := 0; i < len(span); i++ {
if dirs != nil {
var d int
if _, err := fmt.Sscanf(span[i:], "%d", &d); err == nil {
// We've managed to consume an int.
dir := dirs[colIdx]
i += len(strconv.Itoa(d)) - 1
x := d
if dir == encoding.Descending {
x = ^x
}
r.WriteString(strconv.Itoa(x))
continue
}
}
switch {
case span[i] == '/':
colIdx++
r.WriteByte(span[i])
case span[i] == '-' || span[i] == ' ':
// We're switching from the start constraints to the end constraints,
// or starting another span.
colIdx = -1
r.WriteByte(span[i])
case span[i] < ' ':
fmt.Fprintf(&r, "\\x%02x", span[i])
case span[i] <= utf8.RuneSelf:
r.WriteByte(span[i])
default:
c, width := utf8.DecodeRuneInString(span[i:])
if c == utf8.RuneError {
fmt.Fprintf(&r, "\\x%02x", span[i])
} else {
r.WriteRune(c)
}
i += width - 1
}
}
return r.String()
}
func TestPrettyPrintRange(t *testing.T) {
tenSysCodec := keys.SystemSQLCodec
ten5Codec := keys.MakeSQLCodec(roachpb.MakeTenantID(5))
key := makeKey([]byte("a"))
key2 := makeKey([]byte("z"))
tableKey := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 4))
tableKey2 := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 500))
tenTableKey := makeKey(ten5Codec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 999))
specialBytesKeyA := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte("☃️")))
specialBytesKeyB := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte("☃️⚠")))
specialBytesKeyC := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte{0xff, 0x00}))
specialBytesKeyD := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte{0xff, 0xfe}))
testCases := []struct {
start, end roachpb.Key
maxChars int
expected string
}{
{key, nil, 20, "a"},
{tableKey, nil, 10, "/Table/61…"},
{tableKey, specialBytesKeyB, 20, `/Table/61/{4-"\xe2…}`},
{tableKey, specialBytesKeyB, 30, `/Table/61/{4-"☃️…}`},
{tableKey, specialBytesKeyB, 50, `/Table/61/{4-"☃️⚠"}`},
{specialBytesKeyA, specialBytesKeyB, 20, `/Table/61/"☃️…`},
{specialBytesKeyA, specialBytesKeyB, 25, `/Table/61/"☃️{"-\xe2…}`},
{specialBytesKeyA, specialBytesKeyB, 30, `/Table/61/"☃️{"-⚠"}`},
// Note: the PrettyPrintRange() algorithm operates on the result
// of PrettyPrint(), which already turns special characters into
// hex sequences. Therefore, it can merge and truncate the hex
// codes. To improve this would require making PrettyPrint() take
// a bool flag to return un-escaped bytes, and let
// PrettyPrintRange() escape the output adequately.
//
// Since all of this is best-effort, we'll accept the status quo
// for now.
{specialBytesKeyC, specialBytesKeyD, 20, `/Table/61/"\xff\x…`},
{specialBytesKeyC, specialBytesKeyD, 30, `/Table/61/"\xff\x{00"-fe"}`},
{specialBytesKeyB, specialBytesKeyD, 20, `/Table/61/"{\xe2\x98…-\x…}`},
{specialBytesKeyB, specialBytesKeyD, 30, `/Table/61/"{☃️\xe2…-\xff\xf…}`},
{specialBytesKeyB, specialBytesKeyD, 50, `/Table/61/"{☃️⚠"-\xff\xfe"}`},
{tenTableKey, nil, 20, "/Tenant/5/Table/61/…"},
{key, key2, 20, "{a-z}"},
{keys.MinKey, tableKey, 8, "/{M…-T…}"},
{keys.MinKey, tableKey, 15, "/{Min-Tabl…}"},
{keys.MinKey, tableKey, 20, "/{Min-Table/6…}"},
{keys.MinKey, tableKey, 25, "/{Min-Table/61/4}"},
{keys.MinKey, tenTableKey, 8, "/{M…-T…}"},
{keys.MinKey, tenTableKey, 15, "/{Min-Tena…}"},
{keys.MinKey, tenTableKey, 20, "/{Min-Tenant/…}"},
{keys.MinKey, tenTableKey, 25, "/{Min-Tenant/5/…}"},
{keys.MinKey, tenTableKey, 30, "/{Min-Tenant/5/Tab…}"},
{tableKey, tableKey2, 8, "/Table/…"},
{tableKey, tableKey2, 15, "/Table/61/…"},
{tableKey, tableKey2, 20, "/Table/61/{4-500}"},
{tableKey, keys.MaxKey, 10, "/{Ta…-Max}"},
{tableKey, keys.MaxKey, 20, "/{Table/6…-Max}"},
{tableKey, keys.MaxKey, 25, "/{Table/61/4-Max}"},
{tenTableKey, keys.MaxKey, 10, "/{Te…-Max}"},
{tenTableKey, keys.MaxKey, 20, "/{Tenant/…-Max}"},
{tenTableKey, keys.MaxKey, 25, "/{Tenant/5/…-Max}"},
{tenTableKey, keys.MaxKey, 30, "/{Tenant/5/Tab…-Max}"},
}
for i, tc := range testCases {
str := keys.PrettyPrintRange(tc.start, tc.end, tc.maxChars)
if str != tc.expected {
t.Errorf("%d: expected:\n%s\ngot:\n%s", i, tc.expected, str)
}
}
}
func TestFormatHexKey(t *testing.T) {
// Verify that we properly handling the 'x' formatting verb in
// roachpb.Key.Format.
key := keys.StoreIdentKey()
decoded, err := hex.DecodeString(fmt.Sprintf("%x", key))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(key, decoded) {
t.Fatalf("expected %s, but found %s", key, decoded)
}
}
func makeKey(keys ...[]byte) []byte {
return bytes.Join(keys, nil)
}
|
package sham
import (
"fmt"
log "github.com/sirupsen/logrus"
"testing"
"time"
)
func TestNoSchedulerNoop(t *testing.T) {
shamOS := NewOS()
shamOS.Boot()
}
func TestFCFSScheduler(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{&Noop, &Noop}
log.WithField("OS.ReadyProcs", shamOS.ReadyProcs).Debug("before CreateProcess")
shamOS.CreateProcess("processFoo", 10, 1, func(contextual *Contextual) int {
for i := 0; i < 3; i++ {
fmt.Printf("%d From processFoo\n", i)
}
// test use mem
log.WithField("OS.Mem", shamOS.Mem).Debug("before using mem")
mem := &contextual.Process.Memory[0]
if mem.Content == nil {
mem.Content = map[string]string{"hello": "world"}
}
log.WithField("OS.Mem", shamOS.Mem).Debug("after using mem")
// test create new process
log.WithField("OS.ReadyProcs", shamOS.ReadyProcs).Debug("before CreateProcess")
// A system call!
contextual.OS.CreateProcess("ProcessBar", 10, 0, func(contextual *Contextual) int {
fmt.Println("From ProcessBar, a Process dynamic created by processFoo")
return StatusDone
})
log.WithField("OS.ReadyProcs", shamOS.ReadyProcs).Debug("after CreateProcess")
return StatusDone
})
log.WithField("OS.ReadyProcs", shamOS.ReadyProcs).Debug("after CreateProcess")
shamOS.Boot()
}
func TestCommit(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.CreateProcess("processFoo", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
if mem.Content == nil {
mem.Content = map[string]int{"power": 1}
}
logger := log.WithField("mem", mem)
// 3 clock cost: 0, 1, 2
for i := 0; i < 3; i++ {
logger.Debug("[processFoo]")
mem.Content.(map[string]int)["power"] <<= 1
contextual.Commit()
}
// part_3:
logger.Debug("part_3")
fmt.Println("processFoo PC (3 expected):", contextual.PC)
logger.Debug("exit: StatusDone")
return StatusDone
})
shamOS.Boot()
}
func TestReturnStatus(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.CreateProcess("processFoo", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
if mem.Content == nil {
mem.Content = map[string]uint{"PC": contextual.PC}
}
case 3:
fmt.Println("processFoo: PC == 3, exit")
return StatusDone
default:
mem.Content.(map[string]uint)["PC"] += 1
}
fooPC := contextual.PC
contextual.OS.CreateProcess("ProcessBar", 10, 0, func(contextual *Contextual) int {
fmt.Println("From ProcessBar, a Process dynamic created by processFoo. Parent PC:", fooPC)
return StatusDone
})
return StatusReady
})
shamOS.Boot()
}
func TestSeq(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
// 这是一个标准的顺序运行的进程
shamOS.CreateProcess("processSeq", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
if mem.Content == nil {
mem.Content = map[string]uint{"count": 0}
}
log.Debug("Line 0")
return StatusRunning
case 1:
log.Debug("Line 1")
mem.Content.(map[string]uint)["count"] += 1
return StatusRunning
case 2:
log.Debug("Line 2")
mem.Content.(map[string]uint)["count"] += 1
return StatusRunning
case 3:
if mem.Content.(map[string]uint)["count"] == 2 {
fmt.Println("count == 2, exit")
return StatusDone
}
}
return StatusDone
})
shamOS.Boot()
}
func TestCancel(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
go func() {
time.Sleep(2 * time.Second)
shamOS.CPU.Cancel(StatusReady) // if StatusBlocked: all blocked, run noops
}()
shamOS.CreateProcess("processSeq", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
if mem.Content == nil {
mem.Content = map[string]uint{"count": 0}
}
log.Debug("Line 0")
return StatusRunning
case 1:
log.Debug("Line 1")
mem.Content.(map[string]uint)["count"] += 1
return StatusRunning
case 2:
log.Debug("Line 2")
mem.Content.(map[string]uint)["count"] += 1
return StatusRunning
case 3:
if mem.Content.(map[string]uint)["count"] == 2 {
fmt.Println("count == 2, exit")
return StatusDone
}
}
return StatusDone
})
shamOS.Boot()
}
func TestClockInterrupt(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.CreateProcess("processSeq", 10, 1, func(contextual *Contextual) int {
switch {
case contextual.PC < 30:
contextual.OS.CreateProcess(fmt.Sprintf("subprocess%d", contextual.PC), 10, 0, func(contextual *Contextual) int {
fmt.Println(contextual.Process.Id)
return StatusDone
})
log.WithField("PC", contextual.PC).Debug("processSeq continue")
return StatusRunning
case contextual.PC == 30:
log.WithField("PC", contextual.PC).Debug("processSeq exit")
}
return StatusDone
})
shamOS.Boot()
}
func TestStdOut(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.CreateProcess("processSeq", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
chanOutput := make(chan interface{}, 10)
switch contextual.PC {
case 0:
if mem.Content == nil {
mem.Content = map[string]uint{"count": 0}
}
log.Debug("Line 0")
chanOutput <- mem.Content.(map[string]uint)["count"]
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusRunning
case 1:
log.Debug("Line 1")
mem.Content.(map[string]uint)["count"] += 1
chanOutput <- mem.Content.(map[string]uint)["count"]
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusRunning
case 2:
log.Debug("Line 2")
mem.Content.(map[string]uint)["count"] += 1
chanOutput <- mem.Content.(map[string]uint)["count"]
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusRunning
case 3:
if mem.Content.(map[string]uint)["count"] == 2 {
fmt.Println("By fmt.Println: count == 2, exit")
chanOutput <- "By StdOut: count == 2, exit"
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusDone
}
}
return StatusDone
})
shamOS.Boot()
}
func TestHelloWorld(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{} // No Noop
shamOS.CreateProcess("processSeq", 10, 1, func(contextual *Contextual) int {
ch := make(chan interface{}, 1)
ch <- "Hello, world!"
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, ch)
return StatusDone
})
shamOS.Boot()
}
func TestStdIn(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{} // No Noop
shamOS.CreateProcess("processSeq_0", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
in := make(chan interface{}, 1)
// in 会在多个周期中被使用,需要放入内存
mem.Content = map[string]chan interface{}{"in": in}
contextual.OS.InterruptRequest(contextual.Process.Thread, StdInInterrupt, in)
return StatusRunning
case 1:
in := mem.Content.(map[string]chan interface{})["in"]
log.Debug("to recv")
content := <-in
log.WithField("content", content).Debug("got content")
out := make(chan interface{}, 1)
out <- content
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, out)
return StatusDone
}
return StatusDone
})
shamOS.CreateProcess("processSeq_1", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
in := make(chan interface{}, 2)
// in 会在多个周期中被使用,需要放入内存
mem.Content = map[string]chan interface{}{"in": in}
// 要求多个输入
contextual.OS.InterruptRequest(contextual.Process.Thread, StdInInterrupt, in)
contextual.OS.InterruptRequest(contextual.Process.Thread, StdInInterrupt, in)
return StatusRunning
case 1:
in := mem.Content.(map[string]chan interface{})["in"]
log.Debug("to recv")
content := (<-in).(string) + (<-in).(string)
log.WithField("content", content).Debug("got content")
out := make(chan interface{}, 1)
out <- content
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, out)
return StatusDone
}
return StatusDone
})
//shamOS.ReadyProcs = shamOS.ReadyProcs[:1]
shamOS.Boot()
}
func TestClockInterruptMeetIO(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.CreateProcess("processMixItr", 10, 1, func(contextual *Contextual) int {
chanOutput := make(chan interface{}, 10)
switch {
case contextual.PC <= 9:
log.WithField("PC", contextual.PC).Debug("waiting...")
return StatusRunning
case contextual.PC == 10:
chanOutput <- "output something just before clock interrupt"
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusRunning
case contextual.PC == 11:
chanOutput <- "output something just after clock interrupt"
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chanOutput)
return StatusDone
}
return StatusDone
})
shamOS.Boot()
}
func TestPipe(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{} // No Noop
shamOS.CreateProcess("processSend", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
chanNewPipeArg := make(chan interface{}, 2)
chanNewPipeArg <- "pipe_test" // pipeId
chanNewPipeArg <- 3 // pipeBufferSize
contextual.OS.InterruptRequest(contextual.Process.Thread, NewPipeInterrupt, chanNewPipeArg)
return StatusRunning
case 1:
pipe, ok := contextual.Process.Devices["pipe_test"]
if !ok {
log.Error("got no pipe!")
return StatusDone
}
log.WithFields(log.Fields{
"pipe": pipe.GetId(),
}).Debug("processSend: pipe created successfully")
if mem.Content == nil {
mem.Content = map[string]uint{"bpc": 0} // bpc 是下面的 default case 的独立程序计数器
}
return StatusRunning
default:
bpc := mem.Content.(map[string]uint)["bpc"]
pipe := interface{}(contextual.Process.Devices["pipe_test"]).(*Pipe)
switch bpc {
case 0:
if pipe.Inputable() {
pipe.Input() <- "Hello"
mem.Content.(map[string]uint)["bpc"] += 1
log.Debug("processSend sent 1/2")
}
return StatusRunning
case 1:
if pipe.Inputable() {
pipe.Input() <- "World"
mem.Content.(map[string]uint)["bpc"] += 1
log.Debug("processSend sent 2/2")
}
return StatusRunning
}
log.Debug("processSend finish")
return StatusDone
}
})
shamOS.CreateProcess("processRecv", 10, 1, func(contextual *Contextual) int {
mem := &contextual.Process.Memory[0]
switch contextual.PC {
case 0:
chanGetPipeArg := make(chan interface{}, 2)
chanGetPipeArg <- "pipe_test" // pipeId
contextual.OS.InterruptRequest(contextual.Process.Thread, GetPipeInterrupt, chanGetPipeArg)
return StatusRunning
case 1:
if pipe, ok := contextual.Process.Devices["pipe_test"]; ok {
log.WithFields(log.Fields{
"pipe": pipe.GetId(),
}).Debug("processRecv: got pipe successfully")
return StatusRunning
} else {
log.Error("got no pipe!")
return StatusDone
}
default:
if mem.Content == nil {
mem.Content = map[string]interface{}{"bpc": 0} // bpc 这个 default case 的独立程序计数器
}
bpc := mem.Content.(map[string]interface{})["bpc"].(int)
pipe := interface{}(contextual.Process.Devices["pipe_test"]).(*Pipe)
switch bpc {
case 0:
if pipe.Outputable() {
mem.Content.(map[string]interface{})["content0"] = <-pipe.Output()
mem.Content.(map[string]interface{})["bpc"] = bpc + 1
log.Debug("processSend recv 1/2")
}
return StatusRunning
case 1:
if pipe.Outputable() {
mem.Content.(map[string]interface{})["content1"] = <-pipe.Output()
mem.Content.(map[string]interface{})["bpc"] = bpc + 1
log.Debug("processSend recv 2/2")
}
return StatusRunning
}
content0 := mem.Content.(map[string]interface{})["content0"]
content1 := mem.Content.(map[string]interface{})["content1"]
log.WithFields(log.Fields{
"content0": content0,
"content1": content1,
}).Debug("processRecv: get content")
chOutput := make(chan interface{}, 2)
chOutput <- content0
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chOutput)
chOutput <- content1
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chOutput)
}
return StatusDone
})
shamOS.Boot()
}
func TestVarPool(t *testing.T) {
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{} // No Noop
shamOS.CreateProcess("processVarPool", 10, 1, func(contextual *Contextual) int {
switch {
case contextual.PC == 0:
contextual.InitVarPool()
contextual.SetVar("chOutput", make(chan interface{}, 1))
log.Debug("VarPool Setup")
return StatusRunning
case contextual.PC <= 3:
contextual.SetVar("num", contextual.PC*contextual.PC)
chOut := contextual.GetVar("chOutput").(chan interface{})
chOut <- fmt.Sprintln(contextual.GetVar("num"), chOut)
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chOut)
return StatusRunning
}
return StatusDone
})
shamOS.Boot()
}
func TestProducerConsumer(t *testing.T) {
// log.SetLevel(log.ErrorLevel) // 只看标准输出
shamOS := NewOS()
shamOS.Scheduler = FCFSScheduler{}
shamOS.ReadyProcs = []*Process{} // No Noop
const PipeProduct = "pipe_product"
shamOS.CreateProcess("producer", 10, 100, func(contextual *Contextual) int {
switch contextual.PC {
case 0:
log.Debug("producer (PC 0): VarPool Setup")
contextual.InitVarPool()
contextual.SetVar("chOutput", make(chan interface{}, 1))
return StatusRunning
case 1:
log.Debug("producer (PC 1): make the product pipe")
pipeArgs := make(chan interface{}, 2)
pipeArgs <- PipeProduct // pipeId
pipeArgs <- 3 // pipeBufferSize
contextual.OS.InterruptRequest(contextual.Process.Thread, NewPipeInterrupt, pipeArgs)
return StatusRunning
default:
if contextual.PC > 30 {
chOut := contextual.GetVar("chOutput").(chan interface{})
chOut <- "producer contextual.PC > 30, stop and exit"
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chOut)
return StatusDone
}
_, ok := contextual.TryGetVar("dpc")
if !ok {
contextual.SetVar("dpc", 0)
}
dpc := contextual.GetVar("dpc").(int)
switch dpc {
case 0:
product := contextual.PC
contextual.SetVar("product", product)
log.WithFields(log.Fields{
"product": product,
}).Debug("producer produce")
contextual.SetVar("dpc", 1)
return StatusRunning
default:
pipe := interface{}(contextual.Process.Devices[PipeProduct]).(*Pipe)
if pipe.Inputable() {
product := contextual.GetVar("product")
log.WithField("product", product).Debug("producer put product into PipeProduct")
pipe.Input() <- product
contextual.SetVar("dpc", 0)
} else {
log.WithFields(log.Fields{
"PC": contextual.PC,
}).Debug("producer waiting for consuming")
return StatusReady // yield
}
}
return StatusRunning
}
})
shamOS.CreateProcess("consumer", 10, 100, func(contextual *Contextual) int {
switch contextual.PC {
case 0:
contextual.InitVarPool()
contextual.SetVar("chOutput", make(chan interface{}, 1))
log.Debug("consumer (PC 0): VarPool Setup")
return StatusRunning
case 1:
log.Debug("consumer (PC 1): get the product pipe")
pipeArgs := make(chan interface{}, 2)
pipeArgs <- PipeProduct // pipeId
contextual.OS.InterruptRequest(contextual.Process.Thread, GetPipeInterrupt, pipeArgs)
return StatusRunning
default:
if contextual.PC > 30 {
log.Debug("consumer exit")
chOut := contextual.GetVar("chOutput").(chan interface{})
chOut <- "consumer contextual.PC > 30, stop and exit"
return StatusDone
}
_, ok := contextual.TryGetVar("dpc")
if !ok {
contextual.SetVar("dpc", 0)
}
dpc := contextual.GetVar("dpc").(int)
switch dpc {
case 0:
pipe := interface{}(contextual.Process.Devices[PipeProduct]).(*Pipe)
if pipe.Outputable() {
contextual.SetVar("product", <-pipe.Output())
contextual.SetVar("dpc", 1)
log.Debug("consumer get product")
} else {
log.WithFields(log.Fields{
"PC": contextual.PC,
}).Debug("consumer waiting for product")
return StatusReady // yield
}
default:
product := contextual.GetVar("product")
log.WithField("product", product).Debug("consumer consume product")
chOut := contextual.GetVar("chOutput").(chan interface{})
chOut <- fmt.Sprintln("consumer consume product:", product)
contextual.OS.InterruptRequest(contextual.Process.Thread, StdOutInterrupt, chOut)
contextual.SetVar("dpc", 0)
}
return StatusRunning
}
})
shamOS.Boot()
}
|
// SPDX-License-Identifier: MIT
package ast
import (
"reflect"
"unicode"
"github.com/caixw/apidoc/v7/core"
"github.com/caixw/apidoc/v7/internal/node"
)
var searcherType = reflect.TypeOf((*core.Searcher)(nil)).Elem()
// Search 搜索符合条件的对象并返回
//
// 从 doc 中查找符合符合 pos 定位的最小对象,且该对象必须实现了 t 类型。
// 如果不存在则返回 nil。t 必须是一个接口。
func (doc *APIDoc) Search(uri core.URI, pos core.Position, t reflect.Type) (r core.Searcher) {
r = search(reflect.ValueOf(doc), uri, pos, t)
if r == nil { // apidoc 的 uri 可以与 api 的 uri 不同
for _, api := range doc.APIs {
if rr := search(reflect.ValueOf(api), uri, pos, t); rr != nil {
return rr
}
}
}
return r
}
func search(v reflect.Value, uri core.URI, pos core.Position, t reflect.Type) (r core.Searcher) {
if v.IsZero() {
return
}
v = node.RealValue(v)
if v.CanInterface() && v.Type().Implements(searcherType) && (t == nil || v.Type().Implements(t)) {
if rr := v.Interface().(core.Searcher); rr.Contains(uri, pos) {
r = rr
}
} else if v.CanAddr() {
if pv := v.Addr(); pv.CanInterface() && pv.Type().Implements(searcherType) && (t == nil || pv.Type().Implements(t)) {
if rr := pv.Interface().(core.Searcher); rr.Contains(uri, pos) {
r = rr
}
}
}
if r == nil && t == nil { // 不匹配当前元素,也不需要搜查子元素是否实现 t,则直接返回 nil。
return nil
}
if v.Kind() == reflect.Struct {
for vt, i := v.Type(), 0; i < vt.NumField(); i++ {
ft := vt.Field(i)
if ft.Anonymous || unicode.IsLower(rune(ft.Name[0])) {
continue
}
fv := v.Field(i)
if fv.Kind() == reflect.Array || fv.Kind() == reflect.Slice {
for j := 0; j < fv.Len(); j++ {
if rr := search(fv.Index(j), uri, pos, t); rr != nil {
return rr
}
}
continue
} else if rr := search(fv, uri, pos, t); rr != nil {
return rr
}
}
}
return r
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package glbench manipulates the test flow of running glbench test binaries.
package glbench
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/faillog"
"chromiumos/tast/local/graphics"
"chromiumos/tast/local/sysutil"
"chromiumos/tast/testing"
)
var (
// glbench installation folder.
glbenchDir = "/usr/local/glbench/"
// referenceImageFile contains good images.
referenceImageFile = filepath.Join(glbenchDir, "files/glbench_reference_images.txt")
// knownBadImagesFile contains images that are bad but for which the bug has not been fixed yet.
knownBadImagesFile = filepath.Join(glbenchDir, "files/glbench_knownbad_images.txt")
// fixedBadImagesFile contains images that are bad and for which a fix has been submitted.
fixedBadImagesFile = filepath.Join(glbenchDir, "files/glbench_fixedbad_images.txt")
// resultRE is a regex to parse test result. It matches a line like
// "@RESULT: swap_swap = 214.77 us [swap_swap.pixmd5-20dbc406b95e214a799a6a7f9c700d2f.png]" .
resultRE = regexp.MustCompile(`^@RESULT: (\S+)\s*=\s*(\S+) (\S+)\s*\[(.+)\]`)
)
const (
hangCheckTimer = 10 * time.Second
)
// Config is the interface that setup/runs/teardown the glbench running environment.
type Config interface {
SetUp(ctx context.Context) error
Run(ctx context.Context, preValue interface{}, outDir string) (string, error)
TearDown(ctx context.Context) error
IsHasty() bool
}
// Run runs the glbench binary. outDir specifies the directories to store the results. preValue is the structure given by precondition/fixture for test to access container/environment.
func Run(ctx context.Context, outDir string, preValue interface{}, config Config) (resultErr error) {
// Set host hangcheck timer to allow longer GL calls.
if originalTimer, err := graphics.GetHangCheckTimer(); err != nil {
testing.ContextLog(ctx, "Can't get the hangcheck timer, it is normal for kernels older than 5.4: ", err)
} else {
// Only tries to set hangcheck timer if we successfully get the timer.
if er := graphics.SetHangCheckTimer(ctx, hangCheckTimer); er != nil {
return errors.Wrapf(er, "failed to set hangcheck timer to %v", hangCheckTimer)
}
defer graphics.SetHangCheckTimer(ctx, originalTimer)
}
// appendErr append the error with msg to resultErr.
var appendErr = func(err error, msg string, args ...interface{}) error {
resultErr = errors.Wrap(resultErr, errors.Wrapf(err, msg, args...).Error())
return resultErr
}
pv := perf.NewValues()
defer func() {
if err := pv.Save(outDir); err != nil {
appendErr(err, "failed to save perf data")
}
}()
// Leave a bit of time to clean up.
cleanUpCtx := ctx
cleanUpTime := 10 * time.Second
ctx, cancel := ctxutil.Shorten(cleanUpCtx, cleanUpTime)
defer cancel()
if err := config.SetUp(ctx); err != nil {
return appendErr(err, "failed to setup glbench config")
}
defer config.TearDown(cleanUpCtx)
// Logging the initial machine temperature.
if err := ReportTemperature(ctx, pv, "temperature_1_start"); err != nil {
appendErr(err, "failed to report temperature")
}
// Only setup benchmark mode if we are not in hasty mode.
if !config.IsHasty() {
// Make machine behaviour consistent.
if _, err := cpu.WaitUntilCoolDown(ctx, cpu.DefaultCoolDownConfig(cpu.CoolDownPreserveUI)); err != nil {
SaveFailLog(ctx, filepath.Join(outDir, "before_tests1"))
testing.ContextLog(ctx, "Unable get cool machine by default setting: ", err)
if _, err := cpu.WaitUntilCoolDown(ctx, cpu.CoolDownConfig{PollTimeout: 1 * time.Minute, PollInterval: 2 * time.Second, TemperatureThreshold: 60000, CoolDownMode: cpu.CoolDownPreserveUI}); err != nil {
SaveFailLog(ctx, filepath.Join(outDir, "before_tests2"))
appendErr(err, "unable to get cool machine to reach 60C")
}
}
}
// config.Run should run the
output, err := config.Run(ctx, preValue, outDir)
if err != nil {
return appendErr(err, "failed to run glbench")
}
// Logging the afterward machine temperature.
if err := ReportTemperature(ctx, pv, "temperature_3_after_test"); err != nil {
appendErr(err, "failed to report temperature")
}
failedTests, err := analyzeSummary(output, filepath.Join(outDir, "summary.txt"), config.IsHasty(), pv)
if err != nil {
return appendErr(err, "failed to write summary")
}
if len(failedTests) > 0 {
// Sort the tests to make it easier to read in test dashboard.
sort.Strings(failedTests)
return appendErr(err, "Some images don't match their references: %q; check summary.txt for details", failedTests)
}
return
}
// analyzeSummary analyze the output of glbench and write the result to resultPath as well as saving the perf value to pv.
// The function returns the list of failed tests if found.
func analyzeSummary(summary, resultPath string, isHasty bool, pv *perf.Values) ([]string, error) {
// Write a copy of stdout to help debug failures.
f, err := os.OpenFile(resultPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrap(err, "failed to open summary file")
}
defer f.Close()
fmt.Fprintf(f, `# ---------------------------------------------------
#
%s
# -------------------------------------------------
# [glbench.go postprocessing]
`, summary)
// Analyze the output. Sample:
// # board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2
// # Running: ../glbench -save -outdir=img
// @RESULT: swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png]
results := strings.Split(summary, "\n")
if len(results) == 0 {
return nil, errors.New("no output from test")
}
readFile := func(f string) (string, error) {
b, err := ioutil.ReadFile(f)
if err != nil {
return "", errors.Wrap(err, "failed to read files")
}
return string(b), nil
}
// The good images, the silenced and the zombie/recurring failures.
referenceImageNames, err := readFile(referenceImageFile)
if err != nil {
return nil, errors.Wrap(err, "failed ot read referenceImageFile")
}
knownBadImageNames, err := readFile(knownBadImagesFile)
if err != nil {
return nil, errors.Wrap(err, "failed ot read knownBadImagesFile")
}
fixedBadImageNames, err := readFile(fixedBadImagesFile)
if err != nil {
return nil, errors.Wrap(err, "failed ot read fixedBadImagesFile")
}
// Check if we saw GLBench end as expected (without crashing).
testEndedNormal := false
for _, line := range results {
if strings.HasPrefix(strings.TrimSpace(line), "@TEST_END") {
testEndedNormal = true
}
}
if !testEndedNormal {
return nil, errors.Wrap(err, "no end marker(presume crash/missing images)")
}
// Analyze individual test results in summary.
var failedTests []string
for _, line := range results {
line := strings.TrimSpace(line)
if !strings.HasPrefix(line, "@RESULT: ") {
continue
}
m := resultRE.FindStringSubmatch(line)
if m == nil {
return nil, errors.Errorf("%q unexpectedly didn't match %q", line, resultRE.String())
}
testName, score, unit, imageFile := m[1], m[2], m[3], m[4]
testRating, err := strconv.ParseFloat(score, 32)
if err != nil {
return nil, errors.Wrap(err, "failed to parse score")
}
// Prepend unit to test name to maintain backwards compatibility with existing data.
perfValueName := fmt.Sprintf("%s_%s", unit, testName)
pv.Set(perf.Metric{
Name: perfValueName,
Variant: perfValueName,
Unit: unit,
Direction: perf.BiggerIsBetter,
}, testRating)
errMsg := ""
// Classify result image.
switch {
case testRating == -1.0:
// Test generated GL Error.
glError := strings.Split(imageFile, "=")[1]
errMsg = fmt.Sprintf("GLError %s during test", glError)
failedTests = append(failedTests, testName)
case testRating == 0.0:
// Tests for which glbench does not generate a meaningful perf score.
errMsg = "no score for test"
case strings.Contains(fixedBadImageNames, imageFile):
// We know the image looked bad at some point in time but we thought
// it was fixed. Throw an exception as a reminder.
errMsg = fmt.Sprintf("fixedbad [%s]", imageFile)
failedTests = append(failedTests, testName)
case isHasty && strings.Contains(knownBadImageNames, testName):
// If the failure is triaged for the test, mark it as knownbad in hasty mode.
// Don't throw an exception and remind there is a problem.
errMsg = fmt.Sprintf("knownbad [%s]", imageFile)
// This failure is allowed so don't add to failedTests.
case strings.Contains(knownBadImageNames, imageFile):
// We have triaged the failure and have filed a tracking bug.
// Don't throw an exception and remind there is a problem.
errMsg = fmt.Sprintf("knownbad [%s]", imageFile)
// This failure is allowed so don't add to failedTests.
case strings.Contains(referenceImageNames, imageFile):
// Known good reference images (default).
case imageFile == "none":
// Tests that do not write images can't fail because of them.
case noChecksumTest(testName):
// TODO(ihf): these really should not write any images
default:
// Completely unknown images. Report a failure.
errMsg = fmt.Sprintf("unknown [%s]", imageFile)
failedTests = append(failedTests, testName)
}
if errMsg != "" {
fmt.Fprintf(f, "# %s: %s\n", testName, errMsg)
}
}
return failedTests, nil
}
// ReportTemperature set the current temperature to pv. If there's problem reading the value, it sets -1000 as the temperature.
func ReportTemperature(ctx context.Context, pv *perf.Values, name string) error {
temp, err := sysutil.TemperatureInputMax()
if err != nil {
temp = -1000.0
testing.ContextLog(ctx, "Can't read maximum temperature: ", err)
}
pv.Set(perf.Metric{
Name: name,
Unit: "Celsius",
Direction: perf.SmallerIsBetter,
}, temp)
return nil
}
// noChecksumTests are tests that do not draw anything.
// They can only be used to check performance.
var noChecksumTests = []string{
"compositing_no_fill",
"pixel_read",
"texture_rebind_rgba_teximage2d",
"texture_reuse_luminance_teximage2d",
"texture_reuse_luminance_texsubimage2d",
"texture_reuse_rgba_teximage2d",
"texture_reuse_rgba_texsubimage2d",
"context_glsimple",
"swap_glsimple",
}
// noChecksumTest checks if given test requires no screenshot checksum.
func noChecksumTest(name string) bool {
for _, x := range noChecksumTests {
if strings.HasPrefix(name, x) {
return true
}
}
return false
}
// SaveFailLog actively calls faillog.SaveToDir to save information for future debugging.
func SaveFailLog(ctx context.Context, dir string) {
// Create the directory if it is not exist.
if _, err := os.Stat(dir); os.IsNotExist(err) {
os.Mkdir(dir, 0755)
}
faillog.SaveToDir(ctx, dir)
}
|
package fast
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClient_GetUploadSpeed(t *testing.T) {
c := NewClient()
_, err := c.GetUploadSpeed()
assert.ErrorIs(t, err, ErrMethodNotProvided)
}
// Not so good test because of external network dependencies
func TestClient_GetDownloadSpeed(t *testing.T) {
c := NewClient()
res, err := c.GetDownloadSpeed()
require.NoError(t, err)
assert.NotEmpty(t, res)
}
|
package main
import (
"io"
"os"
"github.com/yuyamada/atcoder/lib"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(stdin io.Reader, stdout io.Writer) {
io := lib.NewIo(stdin, stdout)
defer io.Flush()
n := io.NextInt()
a := io.NextInts(n)
ans := solver(a)
io.Println(ans)
}
func solver(a []int) (ans int) {
bit := lib.NewBinaryIndexedTree(len(a))
for i := range a {
ans += i - bit.Sum(a[i])
bit.Add(a[i], 1)
}
return ans
}
|
package storage
import (
"fmt"
"testing"
"gin-msgboard/config"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
type Msgboard struct {
Id uint `gorm:"primary_key"`
Name string
Msg string
Mtime string
}
const (
MSGBOARD_TAB = "msgboard"
)
func TestInitDB(t *testing.T) {
configer, _ := config.LoadConfigFile("../conf/config.json")
config.GlobalDefaultConfig = *configer
conf := config.GetDatabase()
server := conf.DbServers[0]
args := fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local",
server.User,
server.Password,
server.Host,
server.Port,
server.Schema,
)
t.Log(args)
db, _ = gorm.Open(conf.Type, args)
for i := 0; i < 20; i++ {
msg := Msgboard{Name: "Jinzhu", Msg: "tt"}
db = db.Table(MSGBOARD_TAB).Save(&msg)
t.Log(db.Error)
}
}
|
package cmd
import (
"testing"
"os"
"github.com/spf13/viper"
)
func TestExecute(t *testing.T) {
t.Run("Flag defaults", func(t *testing.T) {
RootCmd.Execute()
if cfgFile != "" {
t.Errorf("expected config file to be blank by default; got %s", cfgFile)
}
if outputDir != "" {
t.Errorf("expected output directory to be blank by default; got %s", outputDir)
}
})
t.Run("Reads SACRED_TOKEN from ENV", func(t *testing.T) {
want := "hello"
os.Setenv("SACRED_TOKEN", want)
RootCmd.Execute()
got := viper.Get("token")
if want != got {
t.Errorf("expected to read SACRED_TOKEN from ENV vars")
}
})
t.Run("Reads SACRED_DOMAIN from ENV", func(t *testing.T) {
want := "example.com"
os.Setenv("SACRED_DOMAIN", want)
RootCmd.Execute()
got := viper.Get("domain")
if want != got {
t.Errorf("expected to read SACRED_TOKEN from ENV vars")
}
})
}
func TestMergeCredentials(t *testing.T) {
t.Run("Overrides auth token when the SACRED_TOKEN is set", func(t *testing.T) {
wantToken := "env-token"
os.Setenv("SACRED_TOKEN", wantToken)
MergeCredentials(&cfg)
gotToken := cfg.Auth.Token
if wantToken != gotToken {
t.Errorf("expected %s got %s", wantToken, gotToken)
}
})
t.Run("Overrides auth domain when the SACRED_DOMAIN is set", func(t *testing.T) {
wantDomain := "env-domain"
os.Setenv("SACRED_DOMAIN", wantDomain)
RootCmd.Execute()
MergeCredentials(&cfg)
gotDomain := cfg.Auth.Domain
if wantDomain != gotDomain {
t.Errorf("expected %s got %s", wantDomain, gotDomain)
}
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.