text stringlengths 11 4.05M |
|---|
package main
import (
"errors"
"net/http"
"github.com/SUSE/stratos-ui/components/app-core/backend/repository/interfaces"
"github.com/labstack/echo"
)
// Endpoint - This represents the CNSI endpoint
type Endpoint struct {
GUID string `json:"guid"`
Name string `json:"name"`
Version string `json:"version"`
User *interfaces.ConnectedUser `json:"user"`
CNSIType string `json:"type"`
}
// Info - this represents user specific info
type Info struct {
Versions *Versions `json:"version"`
User *interfaces.ConnectedUser `json:"user"`
Endpoints map[string]map[string]*Endpoint `json:"endpoints"`
CloudFoundry *interfaces.CFInfo `json:"cloud-foundry,omitempty"`
PluginConfig map[string]string `json:"plugin-config,omitempty"`
}
func (p *portalProxy) info(c echo.Context) error {
s, err := p.getInfo(c)
if err != nil {
return echo.NewHTTPError(http.StatusForbidden, err.Error())
}
return c.JSON(http.StatusOK, s)
}
func (p *portalProxy) getInfo(c echo.Context) (*Info, error) {
// get the version
versions, err := p.getVersionsData()
if err != nil {
return nil, errors.New("Could not find database version")
}
// get the user
userGUID, err := p.GetSessionStringValue(c, "user_id")
if err != nil {
return nil, errors.New("Could not find session user_id")
}
uaaUser, err := p.getUAAUser(userGUID)
if err != nil {
return nil, errors.New("Could not load session user data")
}
// create initial info struct
s := &Info{
Versions: versions,
User: uaaUser,
Endpoints: make(map[string]map[string]*Endpoint),
CloudFoundry: p.Config.CloudFoundryInfo,
PluginConfig: p.Config.PluginConfig,
}
// initialize the Endpoints maps
for _, plugin := range p.Plugins {
endpointPlugin, err := plugin.GetEndpointPlugin()
if err != nil {
// Plugin doesn't implement an Endpoint Plugin interface, skip
continue
}
s.Endpoints[endpointPlugin.GetType()] = make(map[string]*Endpoint)
}
// get the CNSI Endpoints
cnsiList, _ := p.buildCNSIList(c)
for _, cnsi := range cnsiList {
endpoint := &Endpoint{
GUID: cnsi.GUID,
Name: cnsi.Name,
}
// try to get the user info for this cnsi for the user
cnsiUser, ok := p.GetCNSIUser(cnsi.GUID, userGUID)
if ok {
endpoint.User = cnsiUser
}
cnsiType := cnsi.CNSIType
s.Endpoints[cnsiType][cnsi.GUID] = endpoint
}
return s, nil
}
|
package azure
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
// NewClient is the default constructor for creating a vision client using the api key.
func NewClient(emotionAPI, emotionKey string) (*Client, error) {
if emotionKey == "" {
return nil, errors.New("missing emotion_key")
}
if emotionAPI == "" {
return nil, errors.New("missing emotion_host")
}
return &Client{
API: emotionAPI,
Key: emotionKey,
client: *http.DefaultClient,
}, nil
}
// FaceAnalysis runs sentiment analysis on an image
func (c *Client) FaceAnalysis(body io.Reader) EmotionData {
result := []EmotionData{}
contentType := "application/octet-stream"
var URL *url.URL
URL, err := url.Parse(c.API)
if err != nil {
check("url parsing", err)
}
URL.Path += "/face/v1.0/detect"
parameters := url.Values{}
parameters.Add("returnFaceAttributes", "emotion")
URL.RawQuery = parameters.Encode()
b, err := ioutil.ReadAll(body)
if err != nil {
check("reading file", err)
}
req, err := http.NewRequest(http.MethodPost, URL.String(), bytes.NewReader(b))
if err != nil {
check("create new request", err)
}
req.Header.Add("Ocp-Apim-Subscription-Key", c.Key)
req.Header.Add("Content-Type", contentType)
req.Header.Add("Content-Length", strconv.Itoa(len(b)))
resp, err := c.client.Do(req)
if err != nil {
check("request", err)
}
defer resp.Body.Close()
var errResp struct {
Code string `json:"code"`
Message string `json:"message"`
}
var resBody io.Reader
peeker := bufio.NewReader(resp.Body)
// when emotion API succeeds, a JSON array is returned instead of an object, so we need to peek
if head, err := peeker.Peek(1); err != nil {
check("peeking", err)
} else if head[0] == '{' {
var body bytes.Buffer
err = json.NewDecoder(io.TeeReader(peeker, &body)).Decode(&errResp)
if err != nil {
check("decoding error response", err)
}
resBody = &body
} else {
resBody = peeker
}
err = fmt.Errorf("status: %s:%s", errResp.Code, errResp.Message)
switch resp.StatusCode {
case http.StatusOK:
case http.StatusTooManyRequests:
return EmotionData{}
case http.StatusUnauthorized:
check("unauthorized", err)
case http.StatusBadRequest:
check("bad request", err)
default:
check("unrecognized", err)
}
json.NewDecoder(resBody).Decode(&result)
return result[0]
}
func check(msg string, e error) {
if e != nil {
panic(fmt.Errorf("%s: %s", msg, e.Error()))
}
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package comdoc
import (
"unicode/utf16"
)
type SecID int32
type DirType uint8
type Color uint8
const (
SecIDFree SecID = -1
SecIDEndOfChain SecID = -2
SecIDSAT SecID = -3
SecIDMSAT SecID = -4
)
const (
DirEmpty DirType = 0
DirStorage DirType = 1
DirStream DirType = 2
DirRoot DirType = 5
)
const (
Red Color = 0
Black Color = 1
)
const byteOrderMarker uint16 = 0xfffe
const msatInHeader = 109
var fileMagic = []byte{0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1, 0x1a, 0xe1}
// Raw CDF file header
type Header struct {
Magic [8]byte
UID [16]byte
Revision uint16
Version uint16
ByteOrder uint16
SectorSize uint16 // power of 2
ShortSectorSize uint16 // power of 2
Reserved1 [6]byte
DirSectorCount uint32 // undocumented?
SATSectors uint32
DirNextSector SecID
Reserved2 uint32
MinStdStreamSize uint32
SSATNextSector SecID
SSATSectorCount uint32
MSATNextSector SecID
MSATSectorCount uint32
MSAT [msatInHeader]SecID
}
// Raw CDF directory entry
type RawDirEnt struct {
NameRunes [32]uint16
NameLength uint16
Type DirType
Color Color
LeftChild int32
RightChild int32
StorageRoot int32
UID [16]byte
UserFlags uint32
CreateTime uint64
ModifyTime uint64
NextSector SecID
StreamSize uint32
_ uint32
}
// Return the UTF8 name of this entry
func (e RawDirEnt) Name() string {
used := e.NameLength/2 - 1
if e.Type == DirEmpty || used > 32 {
return ""
}
return string(utf16.Decode(e.NameRunes[:used]))
}
// Parsed CDF directory entry
type DirEnt struct {
RawDirEnt
// Index into the directory stream holding this entry
Index int
name string
}
// Return the UTF8 name of this entry
func (e DirEnt) Name() string {
return e.name
}
|
package raft
import "sync"
import "sync/atomic"
import "../labrpc"
import "time"
import "math/rand"
import "fmt"
import "bytes"
import "../labgob"
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
type Entry struct {
Command interface{}
Term int
}
//
// A Go object implementing a single Raft peer.
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
dead int32 // set by Kill()
ch chan ApplyMsg
//(2A)
currentTerm int
state int //0 follower, 1 leader, 2 candidate
voteFor int
startTime time.Time
voteNum int
serverNum int
entries []Entry
commitIndex int
lastApplied int
nextIndex []int
matchIndex []int
}
func (rf *Raft) GetState() (int, bool) {
var term int
var isleader bool
term = rf.currentTerm
isleader = (rf.state == 1)
return term, isleader
}
func (rf *Raft) persist() {
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.voteFor)
for i := range rf.entries {
e.Encode(rf.entries[i])
}
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var currentTerm int
var voteFor int
var entry Entry
entries := []Entry{}
if d.Decode(¤tTerm) != nil ||
d.Decode(&voteFor) != nil {
fmt.Println("Error")
} else {
for {
if d.Decode(&entry) != nil {
break
} else {
entries = append(entries, entry)
}
}
rf.currentTerm = currentTerm
rf.voteFor = voteFor
rf.entries = entries
}
}
type RequestVoteArgs struct {
// 2A
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
if rf.killed() {
return
}
if args.Term > rf.currentTerm {
rf.turnToFollower(args.Term)
}
if args.Term == rf.currentTerm && (rf.voteFor ==-1 || rf.voteFor == args.CandidateId) && ((args.LastLogTerm > rf.entries[len(rf.entries)-1].Term) || (args.LastLogTerm == rf.entries[len(rf.entries)-1].Term && args.LastLogIndex >= (len(rf.entries)-1))) {
rf.voteFor = args.CandidateId
reply.VoteGranted = true
rf.startTime = time.Now()
} else{
reply.VoteGranted = false
}
reply.Term = rf.currentTerm
rf.persist()
}
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mu.Lock()
defer rf.mu.Unlock()
index := len(rf.entries)
term := rf.currentTerm
isLeader := rf.state == 1
if isLeader {
entry := Entry{Command: command, Term: term}
rf.entries = append(rf.entries, entry)
rf.persist()
}
return index, term, isLeader
}
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
rf.ch = applyCh
// Your initialization code here (2A, 2B, 2C).
rf.serverNum = len(rf.peers)
rf.commitIndex = 0
rf.entries = []Entry{Entry{Command:1, Term:0}}
rf.state = 0
rf.currentTerm = 0
rf.startTime = time.Now()
rf.voteFor = -1
rf.voteNum = 0
rf.lastApplied = 0
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
//go rf.candidateCheck()
go rf.runElectionTimer()
return rf
}
func (rf *Raft) runElectionTimer() {
timeoutDuration := rand.Int63n(800)+800
rf.mu.Lock()
termStarted := rf.currentTerm
rf.mu.Unlock()
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for {
<-ticker.C
rf.mu.Lock()
if rf.state != 2 && rf.state != 0 {
rf.mu.Unlock()
return
}
if termStarted != rf.currentTerm {
rf.mu.Unlock()
return
}
if elapsed := time.Since(rf.startTime); elapsed.Milliseconds() >= timeoutDuration {
rf.startElection()
rf.mu.Unlock()
return
}
rf.mu.Unlock()
}
}
func (rf *Raft) startElection() {
rf.state = 2
rf.currentTerm += 1
savedCurrentTerm := rf.currentTerm
rf.startTime = time.Now()
rf.voteFor = rf.me
votesReceived := 0
for peerId, _ := range rf.peers {
go func(peerId int) {
args := RequestVoteArgs{
Term: savedCurrentTerm,
CandidateId: rf.me,
LastLogIndex: len(rf.entries)-1,
LastLogTerm: rf.entries[len(rf.entries)-1].Term,
}
var reply RequestVoteReply
if ok := rf.peers[peerId].Call( "Raft.RequestVote", &args, &reply); ok == true {
rf.mu.Lock()
defer rf.mu.Unlock()
if rf.state != 2 {
return
}
if reply.Term > savedCurrentTerm {
rf.turnToFollower(reply.Term)
return
} else if reply.Term == savedCurrentTerm {
if reply.VoteGranted {
votesReceived += 1
if votesReceived > len(rf.peers)/2 {
rf.state=1
go rf.heartbeat()
return
}
}
}
}
}(peerId)
}
// Run another election timer, in case this election is not successful.
go rf.runElectionTimer()
}
type HeartbeatArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []Entry
LeaderCommit int
}
type HeartbeatReply struct {
Term int
Success bool
}
func (rf *Raft) heartbeat() {
//heartbeatTimeout := 100
rf.nextIndex = make([]int, rf.serverNum)
rf.matchIndex = make([]int, rf.serverNum)
for i:=0; i<rf.serverNum; i++ {
rf.nextIndex[i] = len(rf.entries)-1
rf.matchIndex[i] = 0
}
go func(){
ticker:=time.NewTicker(100*time.Millisecond)
defer ticker.Stop()
for {
for i, _ := range rf.peers {
if (i != rf.me) {
args := HeartbeatArgs{}
reply := HeartbeatReply{}
go rf.sendHeartbeat(i, &args, &reply)
}
}
<-ticker.C
rf.mu.Lock()
if rf.state!=1 || rf.killed(){
rf.mu.Unlock()
return
}
rf.mu.Unlock()
}
}()
}
func (rf *Raft) sendHeartbeat(server int, args *HeartbeatArgs, reply *HeartbeatReply) bool {
rf.mu.Lock()
savedCurrentTerm := rf.currentTerm
args.Term = rf.currentTerm
args.LeaderId = rf.me
ni := rf.nextIndex[server]
args.PrevLogIndex = ni
args.LeaderCommit = rf.commitIndex
if args.PrevLogIndex == -1 {
args.PrevLogTerm = -1
} else {
args.PrevLogTerm = rf.entries[ni].Term
}
entries := rf.entries[ni+1:]
args.Entries = entries
rf.mu.Unlock()
if rf.state!=1 {
return false
}
ok := rf.peers[server].Call("Raft.HeartbeatHandler", args, reply)
if !ok {
return ok
}
rf.mu.Lock()
defer rf.mu.Unlock()
if reply.Term > rf.currentTerm {
rf.turnToFollower(reply.Term)
return ok
}
if rf.state!=1 || savedCurrentTerm!=reply.Term {
return ok
}
if !reply.Success {
rf.nextIndex[server] = ni - 1
} else {
rf.nextIndex[server]=ni+len(entries)
rf.matchIndex[server]=rf.nextIndex[server]
savedCommitIndex := rf.commitIndex
for i:=rf.commitIndex+1; i<len(rf.entries); i++{
if rf.entries[i].Term==rf.currentTerm {
matchCount :=1
for j,_ := range rf.peers {
if rf.matchIndex[j]>=i {
matchCount++
}
}
if matchCount > rf.serverNum/2 {
rf.commitIndex=i
}
}
}
for i:=savedCommitIndex+1; i<=rf.commitIndex; i++ {
rf.ch <- ApplyMsg{CommandValid: true, Command:rf.entries[i].Command, CommandIndex:i}
}
}
return ok
}
func (rf *Raft) HeartbeatHandler(args *HeartbeatArgs, reply *HeartbeatReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
if rf.killed() {
return
}
if args.Term > rf.currentTerm {
rf.turnToFollower(args.Term)
}
reply.Success = false
if args.Term == rf.currentTerm {
if rf.state!=0 {
rf.turnToFollower(args.Term)
}
rf.startTime = time.Now()
if args.PrevLogIndex < len(rf.entries) && rf.entries[args.PrevLogIndex].Term == args.PrevLogTerm {
reply.Success = true
logInsertIndex := args.PrevLogIndex + 1
newEntriesIndex := 0
for {
if logInsertIndex >= len(rf.entries) || newEntriesIndex >= len(args.Entries) {
break
}
if rf.entries[logInsertIndex].Term != args.Entries[newEntriesIndex].Term {
break
}
logInsertIndex++
newEntriesIndex++
}
if newEntriesIndex < len(args.Entries) {
rf.entries = append(rf.entries[:logInsertIndex], args.Entries[newEntriesIndex:]...)
}
if args.LeaderCommit > rf.commitIndex {
for i:=rf.commitIndex+1; i<=args.LeaderCommit;i++ {
rf.ch <- ApplyMsg{CommandValid: true, Command:rf.entries[i].Command, CommandIndex:i}
}
rf.commitIndex = args.LeaderCommit
}
}
}
reply.Term = rf.currentTerm
rf.persist()
}
func (rf *Raft) turnToFollower (term int) {
rf.startTime = time.Now()
rf.state = 0
rf.currentTerm = term
rf.voteFor = -1
rf.voteNum = 0
go rf.runElectionTimer()
}
|
package models
import (
"strings"
"time"
"gopkg.in/mgo.v2/bson"
"models/status"
"models/visibility"
)
type NewArticle struct {
Title string `bson:"title"`
Content string `bson:"content"`
Excerpt string
Categories []uint32 `bson:"categories"`
Taxonomies []uint32 `bson:"tags"`
FeaturedImage *NewPicture
Status status.Status `bson:"status"`
ScheduleAt time.Time `bson:"scheduleat"`
Author *UserPrincipal
}
func (art *NewArticle) HasFeaturedImage() bool {
return art.FeaturedImage != nil
}
type NewArticleRequest struct {
Title string
Content string
Categories []bson.ObjectId
Tags []bson.ObjectId `json:",omitempty"`
Metadata Metadata `json:",omitempty"`
Status status.Status
Visibility visibility.Visibility
ScheduleAt time.Time `json:",omitempty"`
}
func (a NewArticleRequest) Validate() (bool, *Errs) {
var errors Errs
title := strings.TrimSpace(a.Title)
content := strings.TrimSpace(a.Content)
switch {
case IsEmpty(title):
errors.AddEmptyError("Title")
case LengthNotBetween(title, 2, 255):
errors.AddMaxLengthError(255, "Title", a.Title)
case ContainsNotAllowed(a.Title, PatternAlphaNumericPun):
errors.AddNotAllowedError("Title", a.Title, PatternAlphaNumericPun)
case IsEmpty(content):
errors.AddEmptyError("Content")
case LengthExceedsMax(content, 2048):
errors.AddMaxLengthError(2048, "Content", a.Content)
case len(a.Categories) == 0:
errors.AddEmptyError("Categories")
case status.IsNotValid(a.Status):
errors.AddInvalidError("Status", a.Status.String())
case visibility.IsNotValid(a.Visibility):
errors.AddInvalidError("Visibility", a.Visibility.String())
}
if len(errors) > 0 {
return false, &errors
}
return true, nil
}
|
/*
Copyright 2021 The DbunderFS Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bazil.org/fuse"
"github.com/spf13/cobra"
)
func unmountCommand() *cobra.Command {
command := &cobra.Command{
Use: "unmount POINT",
Short: "Unmounts mount point from the filesystem",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runUnmount(args[0])
},
}
return command
}
func runUnmount(point string) error {
return fuse.Unmount(point)
}
|
//go:build !darwin && !linux && (!windows || !cgo)
package ingress
import (
"context"
"fmt"
"net/netip"
"runtime"
"time"
"github.com/rs/zerolog"
"github.com/cloudflare/cloudflared/packet"
)
var errICMPProxyNotImplemented = fmt.Errorf("ICMP proxy is not implemented on %s %s", runtime.GOOS, runtime.GOARCH)
type icmpProxy struct{}
func (ip icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error {
return errICMPProxyNotImplemented
}
func (ip *icmpProxy) Serve(ctx context.Context) error {
return errICMPProxyNotImplemented
}
func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) {
return nil, errICMPProxyNotImplemented
}
|
// Package bundledeployment deploys bundles, monitors them and cleans up.
package bundledeployment
import (
"context"
"errors"
"fmt"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/rancher/fleet/internal/cmd/agent/deployer"
"github.com/rancher/fleet/internal/cmd/agent/trigger"
"github.com/rancher/fleet/internal/helmdeployer"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/durations"
fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/merr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
)
var nsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}
type handler struct {
cleanupOnce sync.Once
ctx context.Context
trigger *trigger.Trigger
deployManager *deployer.Manager
bdController fleetcontrollers.BundleDeploymentController
restMapper meta.RESTMapper
dynamic dynamic.Interface
}
func Register(ctx context.Context,
trigger *trigger.Trigger,
restMapper meta.RESTMapper,
dynamic dynamic.Interface,
deployManager *deployer.Manager,
bdController fleetcontrollers.BundleDeploymentController) {
h := &handler{
ctx: ctx,
trigger: trigger,
deployManager: deployManager,
bdController: bdController,
restMapper: restMapper,
dynamic: dynamic,
}
fleetcontrollers.RegisterBundleDeploymentStatusHandler(ctx,
bdController,
"Deployed",
"bundle-deploy",
h.DeployBundle)
fleetcontrollers.RegisterBundleDeploymentStatusHandler(ctx,
bdController,
"Monitored",
"bundle-monitor",
h.MonitorBundle)
bdController.OnChange(ctx, "bundle-trigger", h.Trigger)
bdController.OnChange(ctx, "bundle-cleanup", h.Cleanup)
}
func (h *handler) garbageCollect() {
for {
if err := h.deployManager.Cleanup(); err != nil {
logrus.Errorf("failed to cleanup orphaned releases: %v", err)
}
select {
case <-h.ctx.Done():
return
case <-time.After(wait.Jitter(durations.GarbageCollect, 1.0)):
}
}
}
func (h *handler) Cleanup(key string, bd *fleet.BundleDeployment) (*fleet.BundleDeployment, error) {
h.cleanupOnce.Do(func() {
go h.garbageCollect()
})
if bd != nil {
return bd, nil
}
return nil, h.deployManager.Delete(key)
}
func (h *handler) DeployBundle(bd *fleet.BundleDeployment, status fleet.BundleDeploymentStatus) (fleet.BundleDeploymentStatus, error) {
if bd.Spec.Paused {
// nothing to do
logrus.Debugf("Bundle %s/%s is paused", bd.Namespace, bd.Name)
return status, nil
}
if err := h.checkDependency(bd); err != nil {
logrus.Debugf("Bundle %s/%s has a dependency that is not ready: %v", bd.Namespace, bd.Name, err)
return status, err
}
logrus.Infof("Deploying bundle %s/%s", bd.Namespace, bd.Name)
release, err := h.deployManager.Deploy(bd)
if err != nil {
// When an error from DeployBundle is returned it causes DeployBundle
// to requeue and keep trying to deploy on a loop. If there is something
// wrong with the deployed manifests this will be a loop that re-deploying
// cannot fix. Here we catch those errors and update the status to note
// the problem while skipping the constant requeuing.
if do, newStatus := deployErrToStatus(err, status); do {
// Setting the release to an empty string remove the previous
// release name. When a deployment fails the release name is not
// returned. Keeping the old release name can lead to other functions
// looking up old data in the history and presenting the wrong status.
// For example, the h.deployManager.Deploy function will find the old
// release and not return an error. It will set everything as if the
// current one is running properly.
newStatus.Release = ""
newStatus.AppliedDeploymentID = bd.Spec.DeploymentID
return newStatus, nil
}
return status, err
}
status.Release = release
status.AppliedDeploymentID = bd.Spec.DeploymentID
if err := h.setNamespaceLabelsAndAnnotations(bd, release); err != nil {
return fleet.BundleDeploymentStatus{}, err
}
// Setting the error to nil clears any existing error
condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", nil)
return status, nil
}
// setNamespaceLabelsAndAnnotations updates the namespace for the release, applying all labels and annotations to that namespace as configured in the bundle spec.
func (h *handler) setNamespaceLabelsAndAnnotations(bd *fleet.BundleDeployment, releaseID string) error {
if bd.Spec.Options.NamespaceLabels == nil && bd.Spec.Options.NamespaceAnnotations == nil {
return nil
}
ns, err := h.fetchNamespace(releaseID)
if err != nil {
return err
}
if reflect.DeepEqual(bd.Spec.Options.NamespaceLabels, ns.Labels) && reflect.DeepEqual(bd.Spec.Options.NamespaceAnnotations, ns.Annotations) {
return nil
}
if bd.Spec.Options.NamespaceLabels != nil {
addLabelsFromOptions(ns.Labels, *bd.Spec.Options.NamespaceLabels)
}
if bd.Spec.Options.NamespaceAnnotations != nil {
if ns.Annotations == nil {
ns.Annotations = map[string]string{}
}
addAnnotationsFromOptions(ns.Annotations, *bd.Spec.Options.NamespaceAnnotations)
}
err = h.updateNamespace(ns)
if err != nil {
return err
}
return nil
}
// updateNamespace updates a namespace resource in the cluster.
func (h *handler) updateNamespace(ns *corev1.Namespace) error {
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
if err != nil {
return err
}
_, err = h.dynamic.Resource(nsResource).Update(h.ctx, &unstructured.Unstructured{Object: u}, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
// fetchNamespace gets the namespace matching the release ID. Returns an error if none is found.
func (h *handler) fetchNamespace(releaseID string) (*corev1.Namespace, error) {
// releaseID is composed of release.Namespace/release.Name/release.Version
namespace := strings.Split(releaseID, "/")[0]
list, err := h.dynamic.Resource(nsResource).List(h.ctx, metav1.ListOptions{
LabelSelector: "name=" + namespace,
})
if err != nil {
return nil, err
}
if len(list.Items) == 0 {
return nil, fmt.Errorf("namespace %s not found", namespace)
}
var ns corev1.Namespace
err = runtime.DefaultUnstructuredConverter.
FromUnstructured(list.Items[0].Object, &ns)
if err != nil {
return nil, err
}
return &ns, nil
}
// addLabelsFromOptions updates nsLabels so that it only contains all labels specified in optLabels, plus the `name` labels added by Helm when creating the namespace.
func addLabelsFromOptions(nsLabels map[string]string, optLabels map[string]string) {
for k, v := range optLabels {
nsLabels[k] = v
}
// Delete labels not defined in the options.
// Keep the name label as it is added by helm when creating the namespace.
for k := range nsLabels {
if _, ok := optLabels[k]; k != "name" && !ok {
delete(nsLabels, k)
}
}
}
// addAnnotationsFromOptions updates nsAnnotations so that it only contains all annotations specified in optAnnotations.
func addAnnotationsFromOptions(nsAnnotations map[string]string, optAnnotations map[string]string) {
for k, v := range optAnnotations {
nsAnnotations[k] = v
}
// Delete Annotations not defined in the options.
for k := range nsAnnotations {
if _, ok := optAnnotations[k]; !ok {
delete(nsAnnotations, k)
}
}
}
// deployErrToStatus converts an error into a status update
func deployErrToStatus(err error, status fleet.BundleDeploymentStatus) (bool, fleet.BundleDeploymentStatus) {
if err == nil {
return false, status
}
msg := err.Error()
// The following error conditions are turned into a status
// Note: these error strings are returned by the Helm SDK and its dependencies
re := regexp.MustCompile(
"(timed out waiting for the condition)|" + // a Helm wait occurs and it times out
"(error validating data)|" + // manifests fail to pass validation
"(chart requires kubeVersion)|" + // kubeVersion mismatch
"(annotation validation error)|" + // annotations fail to pass validation
"(failed, and has been rolled back due to atomic being set)|" + // atomic is set and a rollback occurs
"(YAML parse error)|" + // YAML is broken in source files
"(Forbidden: updates to [0-9A-Za-z]+ spec for fields other than [0-9A-Za-z ']+ are forbidden)|" + // trying to update fields that cannot be updated
"(Forbidden: spec is immutable after creation)|" + // trying to modify immutable spec
"(chart requires kubeVersion: [0-9A-Za-z\\.\\-<>=]+ which is incompatible with Kubernetes)", // trying to deploy to incompatible Kubernetes
)
if re.MatchString(msg) {
status.Ready = false
status.NonModified = true
// The ready status is displayed throughout the UI. Setting this as well
// as installed enables the status to be displayed when looking at the
// CRD or a UI build on that.
readyError := fmt.Errorf("not ready: %s", msg)
condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError)
// Deployed and Monitored conditions are automated. They are true if their
// handlers return no error and false if an error is returned. When an
// error is returned they are requeued. To capture the state of an error
// that is not returned a new condition is being captured. Ready is the
// condition that displays for status in general and it is used for
// the readiness of resources. Only when we cannot capture the status of
// resources, like here, can use use it for a message like the above.
// The Installed condition lets us have a condition to capture the error
// that can be bubbled up for Bundles and Gitrepos to consume.
installError := fmt.Errorf("not installed: %s", msg)
condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", installError)
return true, status
}
// The case that the bundle is already in an error state. A previous
// condition with the error should already be applied.
if err == helmdeployer.ErrNoResourceID {
return true, status
}
return false, status
}
func (h *handler) checkDependency(bd *fleet.BundleDeployment) error {
var depBundleList []string
bundleNamespace := bd.Labels[fleet.BundleNamespaceLabel]
for _, depend := range bd.Spec.DependsOn {
// skip empty BundleRef definitions. Possible if there is a typo in the yaml
if depend.Name != "" || depend.Selector != nil {
ls := &metav1.LabelSelector{}
if depend.Selector != nil {
ls = depend.Selector
}
// depend.Name is just a shortcut for matchLabels: {bundle-name: name}
if depend.Name != "" {
ls = metav1.AddLabelToSelector(ls, fleet.BundleLabel, depend.Name)
ls = metav1.AddLabelToSelector(ls, fleet.BundleNamespaceLabel, bundleNamespace)
}
selector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return err
}
bds, err := h.bdController.Cache().List(bd.Namespace, selector)
if err != nil {
return err
}
if len(bds) == 0 {
return fmt.Errorf("list bundledeployments: no bundles matching labels %s in namespace %s", selector.String(), bundleNamespace)
}
for _, depBundle := range bds {
c := condition.Cond("Ready")
if c.IsTrue(depBundle) {
continue
} else {
depBundleList = append(depBundleList, depBundle.Name)
}
}
}
}
if len(depBundleList) != 0 {
return fmt.Errorf("dependent bundle(s) are not ready: %v", depBundleList)
}
return nil
}
func (h *handler) Trigger(key string, bd *fleet.BundleDeployment) (*fleet.BundleDeployment, error) {
if bd == nil || bd.Spec.Paused {
return bd, h.trigger.Clear(key)
}
logrus.Debugf("Triggering for bundledeployment '%s'", key)
resources, err := h.deployManager.AllResources(bd)
if err != nil {
return bd, err
}
if resources == nil {
return bd, nil
}
logrus.Debugf("Adding OnChange for bundledeployment's '%s' resource list", key)
return bd, h.trigger.OnChange(key, resources.DefaultNamespace, func() {
// enqueue bundledeployment if any resource changes
h.bdController.EnqueueAfter(bd.Namespace, bd.Name, 0)
}, resources.Objects...)
}
func isAgent(bd *fleet.BundleDeployment) bool {
return strings.HasPrefix(bd.Name, "fleet-agent")
}
func shouldRedeploy(bd *fleet.BundleDeployment) bool {
if isAgent(bd) {
return true
}
if bd.Spec.Options.ForceSyncGeneration <= 0 {
return false
}
if bd.Status.SyncGeneration == nil {
return true
}
return *bd.Status.SyncGeneration != bd.Spec.Options.ForceSyncGeneration
}
func (h *handler) cleanupOldAgent(modifiedStatuses []fleet.ModifiedStatus) error {
var errs []error
for _, modified := range modifiedStatuses {
if modified.Delete {
gvk := schema.FromAPIVersionAndKind(modified.APIVersion, modified.Kind)
mapping, err := h.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
errs = append(errs, fmt.Errorf("mapping resource for %s for agent cleanup: %w", gvk, err))
continue
}
logrus.Infof("Removing old agent resource %s/%s, %s", modified.Namespace, modified.Name, gvk)
err = h.dynamic.Resource(mapping.Resource).Namespace(modified.Namespace).Delete(h.ctx, modified.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("deleting %s/%s for %s for agent cleanup: %w", modified.Namespace, modified.Name, gvk, err))
continue
}
}
}
return merr.NewErrors(errs...)
}
// removePrivateFields removes fields from the status, which won't be marshalled to JSON.
// They would however trigger a status update in apply
func removePrivateFields(s1 *fleet.BundleDeploymentStatus) {
for id := range s1.NonReadyStatus {
s1.NonReadyStatus[id].Summary.Relationships = nil
s1.NonReadyStatus[id].Summary.Attributes = nil
}
}
func (h *handler) MonitorBundle(bd *fleet.BundleDeployment, status fleet.BundleDeploymentStatus) (fleet.BundleDeploymentStatus, error) {
if bd.Spec.DeploymentID != status.AppliedDeploymentID {
return status, nil
}
// If the bundle failed to install the status should not be updated. Updating
// here would remove the condition message that was previously set on it.
if condition.Cond(fleet.BundleDeploymentConditionInstalled).IsFalse(bd) {
return status, nil
}
// Same considerations in case the bundle is paused
if bd.Spec.Paused {
return status, nil
}
err := h.deployManager.UpdateBundleDeploymentStatus(h.restMapper, bd)
if err != nil {
// Returning an error will cause MonitorBundle to requeue in a loop.
// When there is no resourceID the error should be on the status. Without
// the ID we do not have the information to lookup the resources to
// compute the plan and discover the state of resources.
if err == helmdeployer.ErrNoResourceID {
return status, nil
}
return status, err
}
status = bd.Status
readyError := readyError(status)
condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError)
if len(status.ModifiedStatus) > 0 {
h.bdController.EnqueueAfter(bd.Namespace, bd.Name, durations.MonitorBundleDelay)
if shouldRedeploy(bd) {
logrus.Infof("Redeploying %s", bd.Name)
status.AppliedDeploymentID = ""
if isAgent(bd) {
if err := h.cleanupOldAgent(status.ModifiedStatus); err != nil {
return status, fmt.Errorf("failed to clean up agent: %w", err)
}
}
}
}
status.SyncGeneration = &bd.Spec.Options.ForceSyncGeneration
if readyError != nil {
logrus.Errorf("bundle %s: %v", bd.Name, readyError)
}
removePrivateFields(&status)
return status, nil
}
func readyError(status fleet.BundleDeploymentStatus) error {
if status.Ready && status.NonModified {
return nil
}
var msg string
if !status.Ready {
msg = "not ready"
if len(status.NonReadyStatus) > 0 {
msg = status.NonReadyStatus[0].String()
}
} else if !status.NonModified {
msg = "out of sync"
if len(status.ModifiedStatus) > 0 {
msg = status.ModifiedStatus[0].String()
}
}
return errors.New(msg)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package peertls
// Many cryptography standards use ASN.1 to define their data structures,
// and Distinguished Encoding Rules (DER) to serialize those structures.
// Because DER produces binary output, it can be challenging to transmit
// the resulting files through systems, like electronic mail, that only
// support ASCII. The PEM format solves this problem by encoding the
// binary data using base64.
// (see https://en.wikipedia.org/wiki/Privacy-enhanced_Electronic_Mail)
import (
"crypto"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"math/big"
"github.com/zeebo/errs"
"storj.io/common/pkcrypto"
)
// NonTemporaryError is an error with a `Temporary` method which always returns false.
// It is intended for use with grpc.
//
// (see https://godoc.org/google.golang.org/grpc#WithDialer
// and https://godoc.org/google.golang.org/grpc#FailOnNonTempDialError).
type NonTemporaryError struct{ error }
// NewNonTemporaryError returns a new temporary error for use with grpc.
func NewNonTemporaryError(err error) NonTemporaryError {
return NonTemporaryError{
error: errs.Wrap(err),
}
}
// DoubleSHA256PublicKey returns the hash of the hash of (double-hash, SHA226)
// the binary format of the given public key.
func DoubleSHA256PublicKey(k crypto.PublicKey) ([sha256.Size]byte, error) {
kb, err := x509.MarshalPKIXPublicKey(k)
if err != nil {
return [sha256.Size]byte{}, err
}
mid := sha256.Sum256(kb)
end := sha256.Sum256(mid[:])
return end, nil
}
// Temporary returns false to indicate that is is a non-temporary error.
func (nte NonTemporaryError) Temporary() bool {
return false
}
// Err returns the underlying error.
func (nte NonTemporaryError) Err() error {
return nte.error
}
func verifyChainSignatures(certs []*x509.Certificate) error {
for i, cert := range certs {
j := len(certs)
if i+1 < j {
err := verifyCertSignature(certs[i+1], cert)
if err != nil {
return ErrVerifyCertificateChain.Wrap(err)
}
continue
}
err := verifyCertSignature(cert, cert)
if err != nil {
return ErrVerifyCertificateChain.Wrap(err)
}
}
return nil
}
func verifyCertSignature(parentCert, childCert *x509.Certificate) error {
return pkcrypto.HashAndVerifySignature(parentCert.PublicKey, childCert.RawTBSCertificate, childCert.Signature)
}
func newSerialNumber() (*big.Int, error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, errs.New("failed to generateServerTls serial number: %s", err.Error())
}
return serialNumber, nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernel
import (
"math"
"gvisor.dev/gvisor/pkg/bitmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
)
type descriptorBucket [fdsPerBucket]descriptorAtomicPtr
type descriptorBucketSlice []descriptorBucketAtomicPtr
// descriptorTable is a two level table. The first level is a slice of
// *descriptorBucket where each bucket is a slice of *descriptor.
//
// All objects are updated atomically.
type descriptorTable struct {
// Changes to the slice itself requiring holding FDTable.mu.
slice descriptorBucketSliceAtomicPtr `state:".(map[int32]*descriptor)"`
}
// initNoLeakCheck initializes the table without enabling leak checking.
//
// This is used when loading an FDTable after S/R, during which the ref count
// object itself will enable leak checking if necessary.
func (f *FDTable) initNoLeakCheck() {
var slice descriptorBucketSlice // Empty slice.
f.slice.Store(&slice)
}
// init initializes the table with leak checking.
func (f *FDTable) init() {
f.initNoLeakCheck()
f.InitRefs()
f.fdBitmap = bitmap.New(uint32(math.MaxUint16))
}
const (
// fdsPerBucketShift is chosen in such a way that the size of bucket is
// equal to one page.
fdsPerBucketShift = 9
fdsPerBucket = 1 << fdsPerBucketShift
fdsPerBucketMask = fdsPerBucket - 1
)
// get gets a file entry.
//
// The boolean indicates whether this was in range.
//
//go:nosplit
func (f *FDTable) get(fd int32) (*vfs.FileDescription, FDFlags, bool) {
slice := *f.slice.Load()
bucketN := fd >> fdsPerBucketShift
if bucketN >= int32(len(slice)) {
return nil, FDFlags{}, false
}
bucket := slice[bucketN].Load()
if bucket == nil {
return nil, FDFlags{}, false
}
d := bucket[fd&fdsPerBucketMask].Load()
if d == nil {
return nil, FDFlags{}, true
}
return d.file, d.flags, true
}
// CurrentMaxFDs returns the number of file descriptors that may be stored in f
// without reallocation.
func (f *FDTable) CurrentMaxFDs() int {
slice := *f.slice.Load()
return len(slice) * fdsPerBucket
}
// set sets the file description referred to by fd to file. If
// file is non-nil, it takes a reference on them. If setAll replaces
// an existing file description, it returns it with the FDTable's reference
// transferred to the caller, which must call f.drop on the returned
// file after unlocking f.mu.
//
// Precondition: mu must be held.
func (f *FDTable) set(fd int32, file *vfs.FileDescription, flags FDFlags) *vfs.FileDescription {
slicePtr := f.slice.Load()
bucketN := fd >> fdsPerBucketShift
// Grow the table as required.
if length := len(*slicePtr); int(bucketN) >= length {
newLen := int(bucketN) + 1
if newLen < 2*length {
// Ensure the table at least doubles in size without going over the limit.
newLen = 2 * length
if newLen > int(MaxFdLimit) {
newLen = int(MaxFdLimit)
}
}
newSlice := append(*slicePtr, make([]descriptorBucketAtomicPtr, newLen-length)...)
slicePtr = &newSlice
f.slice.Store(slicePtr)
}
slice := *slicePtr
bucket := slice[bucketN].Load()
if bucket == nil {
bucket = &descriptorBucket{}
slice[bucketN].Store(bucket)
}
var desc *descriptor
if file != nil {
desc = &descriptor{
file: file,
flags: flags,
}
}
// Update the single element.
orig := bucket[fd%fdsPerBucket].Swap(desc)
// Acquire a table reference.
if desc != nil && desc.file != nil {
if orig == nil || desc.file != orig.file {
desc.file.IncRef()
}
}
if orig != nil && orig.file != nil {
if desc == nil || desc.file != orig.file {
return orig.file
}
}
return nil
}
|
package main
import (
"fmt"
"log"
"github.com/stephen-fox/radareutil"
)
func main() {
cliApi, err := radareutil.NewCliApi(&radareutil.Radare2Config{
// The executable path does not need to be fully qualified.
ExecutablePath: "radare2",
})
if err != nil {
log.Fatalf("failed to create a CLI API - %s", err.Error())
}
err = cliApi.Start()
if err != nil {
log.Fatalf("failed to start CLI API - %s", err.Error())
}
defer cliApi.Kill()
output, err := cliApi.Execute("?")
if err != nil {
log.Fatalf("failed to execute API command - %s", err.Error())
}
fmt.Println(output)
} |
package LeetCode
import (
"bytes"
"fmt"
)
func Code206() {
l1 := InitSingleList([]int{1, 2, 3, 4, 5})
fmt.Println(reverseList(l1))
}
/**
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func reverseList(head *ListNode) *ListNode {
path := []byte("AAAA/BBBB")
sepIndex := bytes.IndexByte(path, '/')
dir1 := path[:sepIndex]
dir2 := path[sepIndex+1:]
fmt.Println(string(dir1), dir2)
dir1 = append(dir1, "suffex"...)
path = bytes.Join([][]byte{dir1, dir2}, []byte{'/'})
fmt.Println(111, string(dir1), string(dir2))
fmt.Println(222, string(path))
fmt.Println(dir1)
a := []int{1, 2}
b := []int{3, 4}
check := a
a = b
fmt.Println(a, b, check)
if head == nil {
return nil
}
var pre *ListNode
p := head
for p != nil {
pre, p, p.Next = p, p.Next, pre
}
return pre
}
|
package ThreadPool
import "fmt"
type Task struct {
taskId int
f func(id int) error
}
// NewTask Task的构造函数
func NewTask(id int, f1 func(id int) error) *Task {
return &Task{
taskId: id,
f: f1,
}
}
// 执行task
func (t *Task) execute() {
fmt.Println("taskId:", t.taskId, "is execute")
t.f(t.taskId)
}
|
package prop
import (
"fmt"
"github.com/LILILIhuahuahua/ustc_tencent_game/configs"
"testing"
)
func TestNewProps(t *testing.T) {
ids := make(map[int32]bool)
//coods := make(map[info.CoordinateXYInfo]bool)
m := New()
fmt.Println(m)
for k, v := range m.props {
fmt.Printf("index: %d, id: %d, status: %v, coords: %v\n", k, v.Id, v.Status, v.Pos)
if _, ok := ids[v.Id]; ok {
t.Errorf("id duplicate: %v", v.Id)
} else {
ids[v.Id] = true
}
if !(v.Pos.X > configs.MapMinX && v.Pos.X < configs.MapMaxX) ||
!(v.Pos.Y > configs.MapMinY && v.Pos.Y < configs.MapMaxY) {
t.Errorf("index: %d, id: %d, coordinates out of range, x: %f, y: %f", k, v.Id, v.Pos.X, v.Pos.Y)
}
//if _, ok := coods[v.pos]; ok {
// t.Errorf("duplicate coordinates: %v", v.pos)
//} else {
// coods[v.pos] = true
//}
}
t.Logf("total props: %d", len(m.props))
}
|
package persistence
import (
"context"
"github.com/go-redis/redis/v8"
)
// NewRedisClient returns a redis client.
func NewRedisClient(url string) (*redis.Client, error) {
options, err := redis.ParseURL(url)
if err != nil {
return nil, err
}
client := redis.NewClient(options)
_, err = client.Ping(context.Background()).Result()
if err != nil {
return nil, err
}
return client, nil
}
|
package models
type Error struct {
Message string
}
|
// Copyright 2020 Red Hat, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package misc
import (
"strings"
"github.com/coreos/mantle/kola/cluster"
"github.com/coreos/mantle/kola/register"
"github.com/coreos/mantle/platform"
"github.com/coreos/mantle/platform/machine/unprivqemu"
)
func init() {
register.RegisterTest(®ister.Test{
Name: "multipath",
Run: runMultipath,
ClusterSize: 0,
Platforms: []string{"qemu-unpriv"},
})
}
func runMultipath(c cluster.TestCluster) {
var m platform.Machine
var err error
options := platform.QemuMachineOptions{
MultiPathDisk: true,
}
switch pc := c.Cluster.(type) {
// These cases have to be separated because when put together to the same case statement
// the golang compiler no longer checks that the individual types in the case have the
// NewMachineWithQemuOptions function, but rather whether platform.Cluster
// does which fails
case *unprivqemu.Cluster:
m, err = pc.NewMachineWithQemuOptions(nil, options)
default:
panic("unreachable")
}
if err != nil {
c.Fatal(err)
}
c.MustSSH(m, "sudo rpm-ostree kargs --append rd.multipath=default --append root=/dev/disk/by-label/dm-mpath-root")
err = m.Reboot()
if err != nil {
c.Fatalf("Failed to reboot the machine: %v", err)
}
for _, mnt := range []string{"/sysroot", "/boot"} {
srcdev := string(c.MustSSHf(m, "findmnt -nvr %s -o SOURCE", mnt))
if !strings.HasPrefix(srcdev, "/dev/mapper/mpath") {
c.Fatalf("mount %s has non-multipath source %s", mnt, srcdev)
}
}
}
|
package pkg
import (
"fmt"
"github.com/mrahbar/kubernetes-inspector/ssh"
"github.com/mrahbar/kubernetes-inspector/types"
"github.com/mrahbar/kubernetes-inspector/util"
"os"
"path"
"path/filepath"
"strings"
"time"
)
const (
netperfNamespace = "netperf"
orchestratorName = "netperf-orchestrator"
workerName = "netperf-w"
orchestratorMode = "orchestrator"
workerMode = "worker"
csvDataMarker = "GENERATING CSV OUTPUT"
csvEndDataMarker = "END CSV DATA"
outputCaptureFile = "/tmp/output.txt"
resultCaptureFile = "/tmp/result.csv"
netperfImage = "endianogino/netperf:1.1"
workerCount = 3
orchestratorPort = 5202
iperf3Port = 5201
netperfPort = 12865
)
var netperfOpts *types.NetperfOpts
func Netperf(cmdParams *types.CommandContext) {
initParams(cmdParams)
netperfOpts = cmdParams.Opts.(*types.NetperfOpts)
group := util.FindGroupByName(config.ClusterGroups, types.MASTER_GROUPNAME)
if group.Nodes == nil || len(group.Nodes) == 0 {
printer.PrintCritical("No host configured for group [%s]", types.MASTER_GROUPNAME)
}
node := ssh.GetFirstAccessibleNode(config.Ssh.LocalOn, cmdExecutor, group.Nodes)
if !util.IsNodeAddressValid(node) {
printer.PrintCritical("No master available")
}
if netperfOpts.OutputDir == "" {
exPath, err := util.GetExecutablePath()
if err != nil {
printer.PrintCritical("Could not get current executable path: %s", err)
}
netperfOpts.OutputDir = path.Join(exPath, "netperf-results")
}
err := os.MkdirAll(netperfOpts.OutputDir, os.ModePerm)
if err != nil {
printer.PrintCritical("Failed to open output file for path %s Error: %v", netperfOpts.OutputDir, err)
}
printer.PrintHeader(fmt.Sprintf("Running network test from node %s", util.ToNodeLabel(node)), '=')
cmdExecutor.SetNode(node)
checkingNetperfPreconditions()
createNetperfNamespace()
createNetperfServices()
createNetperfReplicationControllers()
waitForNetperfServicesToBeRunning()
displayNetperfPods()
fetchTestResults()
if netperfOpts.Cleanup {
printer.PrintInfo("Cleaning up...")
removeNetperfServices()
removeNetperfReplicationControllers()
}
printer.PrintOk("DONE")
}
func checkingNetperfPreconditions() {
count, err := cmdExecutor.GetNumberOfReadyNodes()
if err != nil {
printer.PrintCritical("Error checking node count: %s", err)
} else if count < 2 {
printer.PrintErr("Insufficient number of nodes for netperf test (need minimum of 2 nodes)")
os.Exit(2)
}
}
func createNetperfNamespace() {
printer.PrintInfo("Creating namespace")
err := cmdExecutor.CreateNamespace(netperfNamespace)
if err != nil {
printer.PrintCritical("Error creating test namespace: %s", err)
} else {
printer.PrintOk("Namespace %s created", netperfNamespace)
}
printer.PrintNewLine()
}
func createNetperfServices() {
printer.PrintInfo("Creating services")
// Host
data := types.Service{Name: orchestratorName, Namespace: netperfNamespace, Ports: []types.ServicePort{
{
Name: orchestratorName,
Port: orchestratorPort,
Protocol: "TCP",
TargetPort: orchestratorPort,
},
}}
exists, err := cmdExecutor.CreateService(data)
if exists {
printer.PrintIgnored("Service: %s already exists.", orchestratorName)
} else if err != nil {
printer.PrintCritical("Error adding service %v: %s", orchestratorName, err)
} else {
printer.PrintOk("Service %s created", orchestratorName)
}
// Create the netperf-w2 service that points a clusterIP at the worker 2 pod
name := fmt.Sprintf("%s%d", workerName, 2)
data = types.Service{Name: name, Namespace: netperfNamespace, Ports: []types.ServicePort{
{
Name: name,
Protocol: "TCP",
Port: iperf3Port,
TargetPort: iperf3Port,
},
{
Name: fmt.Sprintf("%s-%s", name, "udp"),
Protocol: "UDP",
Port: iperf3Port,
TargetPort: iperf3Port,
},
{
Name: fmt.Sprintf("%s-%s", name, "netperf"),
Protocol: "TCP",
Port: netperfPort,
TargetPort: netperfPort,
},
}}
exists, err = cmdExecutor.CreateService(data)
if exists {
printer.PrintIgnored("Service: %s already exists.", name)
} else if err != nil {
printer.PrintCritical("Error adding service %v: %s", name, err)
} else {
printer.PrintOk("Service %s created", name)
}
printer.PrintNewLine()
}
func createNetperfReplicationControllers() {
printer.PrintInfo("Creating ReplicationControllers")
hostRC := types.ReplicationController{Name: orchestratorName, Namespace: netperfNamespace,
Image: netperfImage,
Args: []types.Arg{
{
Key: "--mode",
Value: orchestratorMode,
},
},
Ports: []types.PodPort{
{
Name: "service-port",
Protocol: "TCP",
Port: orchestratorPort,
},
},
}
err := cmdExecutor.CreateReplicationController(hostRC)
if err != nil {
printer.PrintCritical("Error creating %s replication controller: %s", orchestratorName, err)
} else {
printer.PrintOk("Created %s replication-controller", orchestratorName)
}
args := []string{"get", "nodes", " | ", "grep", "-w", "\"Ready\"", " | ", "sed", "-e", "\"s/[[:space:]]\\+/,/g\""}
sshOut, err := cmdExecutor.RunKubectlCommand(args)
if err != nil {
printer.PrintCritical("Error getting nodes for worker replication controller: %s", err)
} else {
printer.Print("Waiting 5s to give orchestrator pod time to start")
time.Sleep(5 * time.Second)
hostIP, err := getServiceIP(orchestratorName)
if hostIP == "" || err != nil {
printer.PrintCritical("Error getting clusterIP of service %s: %s", orchestratorName, err)
}
lines := strings.SplitN(sshOut.Stdout, "\n", -1)
firstNode := strings.Split(lines[0], ",")[0]
secondNode := strings.Split(lines[1], ",")[0]
for i := 1; i <= workerCount; i++ {
name := fmt.Sprintf("%s%d", workerName, i)
kubeNode := firstNode
if i == 3 {
kubeNode = secondNode
}
clientRC := types.ReplicationController{Name: name, Namespace: netperfNamespace, Image: netperfImage,
NodeName: kubeNode,
Args: []types.Arg{
{
Key: "--mode",
Value: workerMode,
},
},
Ports: []types.PodPort{
{
Name: "iperf3-port",
Protocol: "UDP",
Port: iperf3Port,
},
{
Name: "netperf-port",
Protocol: "TCP",
Port: netperfPort,
},
},
Envs: []types.Env{
{
Name: "workerName",
Value: name,
},
{
Name: "workerPodIP",
FieldValue: "status.podIP",
},
{
Name: "orchestratorPort",
Value: "5202",
},
{
Name: "orchestratorPodIP",
Value: orchestratorName,
},
},
}
_, err := cmdExecutor.DeployKubernetesResource(types.REPLICATION_CONTROLLER_TEMPLATE, clientRC)
if err != nil {
printer.PrintCritical("Error creating %s replication controller: %s", name, err)
} else {
printer.PrintOk("Created %s replication-controller", name)
}
}
}
printer.PrintNewLine()
}
func waitForNetperfServicesToBeRunning() {
printer.PrintInfo("Waiting for pods to be Running...")
waitTime := time.Second
done := false
for !done {
tmpl := "\"{..status.phase}\""
args := []string{"--namespace=" + netperfNamespace, "get", "pods", "-o", "jsonpath=" + tmpl}
sshOut, err := cmdExecutor.RunKubectlCommand(args)
if err != nil {
printer.PrintWarn("Error running kubectl command '%v': %s", args, err)
}
lines := strings.Split(sshOut.Stdout, " ")
if len(lines) < workerCount+1 {
printer.Print("Service status output too short. Waiting %v then checking again.", waitTime)
time.Sleep(waitTime)
waitTime *= 2
continue
}
allRunning := true
for _, p := range lines {
if p != "Running" {
allRunning = false
break
}
}
if !allRunning {
printer.Print("Services not running. Waiting %v then checking again.", waitTime)
time.Sleep(waitTime)
waitTime *= 2
} else {
done = true
}
}
printer.PrintNewLine()
}
func displayNetperfPods() {
result, err := cmdExecutor.GetPods(netperfNamespace, true)
if err != nil {
printer.PrintWarn("Error running kubectl command '%v'", err)
} else {
printer.Print("Pods are running\n%s", result.Stdout)
}
printer.PrintNewLine()
}
func fetchTestResults() {
printer.PrintInfo("Waiting till pods orchestrate themselves. This may take several minutes..")
orchestratorPodName := getPodName(orchestratorName)
sleep := 30 * time.Second
for len(orchestratorPodName) == 0 {
printer.PrintInfo("Waiting %s for orchestrator pod creation", sleep)
time.Sleep(sleep)
orchestratorPodName = getPodName(orchestratorName)
}
printer.Print("The pods orchestrate themselves, waiting for the results file to show up in the orchestrator pod %s", orchestratorPodName)
sleep = 5 * time.Minute
printer.PrintNewLine()
for true {
// Monitor the orchestrator pod for the CSV results file
csvdata := getCsvResultsFromPod(orchestratorPodName)
if csvdata == nil {
printer.PrintSkipped("Scanned orchestrator pod filesystem - no results file found yet...waiting %s for orchestrator to write CSV file...", sleep)
time.Sleep(sleep)
continue
}
printer.PrintInfo("Test concluded - CSV raw data written to %s", netperfOpts.OutputDir)
if processCsvData(orchestratorPodName) {
break
}
}
}
// Retrieve the logs for the pod/container and check if csv data has been generated
func getCsvResultsFromPod(podName string) *string {
args := []string{"--namespace=" + netperfNamespace, "logs", podName, "--timestamps=false"}
sshOut, err := cmdExecutor.RunKubectlCommand(args)
logData := sshOut.Stdout
if err != nil {
printer.PrintWarn("Error reading logs from pod %s: %s", podName, err)
return nil
}
index := strings.Index(logData, csvDataMarker)
endIndex := strings.Index(logData, csvEndDataMarker)
if index == -1 || endIndex == -1 {
return nil
}
csvData := string(logData[index+len(csvDataMarker)+1: endIndex])
return &csvData
}
// processCsvData : Fetch the CSV datafile
func processCsvData(podName string) bool {
remote := fmt.Sprintf("%s/%s:%s", netperfNamespace, podName, resultCaptureFile)
_, err := cmdExecutor.RunKubectlCommand([]string{"cp", remote, resultCaptureFile})
if err != nil {
printer.PrintErr("Couldn't copy output CSV datafile %s from remote %s: %s",
resultCaptureFile, util.GetNodeAddress(cmdExecutor.GetNode()), err)
return false
}
err = cmdExecutor.DownloadFile(resultCaptureFile, filepath.Join(netperfOpts.OutputDir, "result.csv"))
if err != nil {
printer.PrintErr("Couldn't fetch output CSV datafile %s from remote %s: %s",
resultCaptureFile, util.GetNodeAddress(cmdExecutor.GetNode()), err)
return false
}
remote = fmt.Sprintf("%s/%s:%s", netperfNamespace, podName, outputCaptureFile)
_, err = cmdExecutor.RunKubectlCommand([]string{"cp", remote, outputCaptureFile})
if err != nil {
printer.PrintErr("Couldn't copy output RAW datafile %s from remote %s: %s",
outputCaptureFile, util.GetNodeAddress(cmdExecutor.GetNode()), err)
return false
}
err = cmdExecutor.DownloadFile(outputCaptureFile, filepath.Join(netperfOpts.OutputDir, "output.txt"))
if err != nil {
printer.PrintErr("Couldn't fetch output RAW datafile %s from remote %s: %s",
outputCaptureFile, util.GetNodeAddress(cmdExecutor.GetNode()), err)
return false
}
return true
}
func removeNetperfServices() {
name := "svc/" + orchestratorName
err := cmdExecutor.RemoveResource(netperfNamespace, name)
if err != nil {
printer.PrintWarn("Error deleting service '%v'", name, err)
}
name = fmt.Sprintf("svc/%s%d", workerName, 2)
err = cmdExecutor.RemoveResource(netperfNamespace, name)
if err != nil {
printer.PrintWarn("Error deleting service '%v'", name, err)
}
}
func removeNetperfReplicationControllers() {
err := cmdExecutor.RemoveResource(netperfNamespace, orchestratorName)
if err != nil {
printer.PrintWarn("Error deleting replication-controller '%v'", orchestratorName, err)
}
for i := 1; i <= workerCount; i++ {
name := fmt.Sprintf("rc/%s%d", workerName, i)
err := cmdExecutor.RemoveResource(netperfNamespace, name)
if err != nil {
printer.PrintWarn("Error deleting replication-controller '%v'", name, err)
}
}
}
func getPodName(name string) string {
tmpl := "\"{..metadata.name}\""
args := []string{"--namespace=" + netperfNamespace, "get", "pods", "-l", "app=" + name, "-o", "jsonpath=" + tmpl}
sshOut, err := cmdExecutor.RunKubectlCommand(args)
if err != nil {
return ""
}
return strings.TrimRight(sshOut.Stdout, "\n")
}
func getServiceIP(name string) (string, error) {
tmpl := "\"{..spec.clusterIP}\""
args := []string{"--namespace=" + netperfNamespace, "get", "service", "-l", "app=" + name, "-o", "jsonpath=" + tmpl}
sshOut, err := cmdExecutor.RunKubectlCommand(args)
if err != nil {
return "", err
}
return strings.Trim(sshOut.Stdout, " \n"), nil
}
|
package sequencing
type Sequence interface {
Val(int) interface{}
Len() int
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The shadertool command modifies shader source code.
// For example, it converts GLSL to the desktop dialect.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"sync"
"github.com/google/gapid/core/app"
"github.com/google/gapid/core/app/crash"
"github.com/google/gapid/gapis/shadertools"
)
var (
out = flag.String("out", "", "Directory for the converted shaders")
check = flag.Bool("check", true, "Verify that the output compiles")
debug = flag.Bool("debug", false, "Make the shader debuggable")
asm = flag.Bool("asm", false, "Print disassembled info")
)
func main() {
app.Name = "shadertool"
app.ShortHelp = "Converts GLSL ES shader to the desktop GLSL dialect"
app.ShortUsage = "<shader file>"
app.Run(run)
}
func run(ctx context.Context) error {
args := flag.Args()
if len(args) == 0 {
flag.Usage()
return nil
}
// Read input
var wg sync.WaitGroup
for _, input := range args {
input := input
source, err := ioutil.ReadFile(input)
if err != nil {
return err
}
wg.Add(1)
crash.Go(func() {
defer wg.Done()
// Process the shader
result, err := convert(string(source), filepath.Ext(input))
if err != nil {
fmt.Printf("%v: %v\n", input, err)
return
}
// Write output
if *out == "" {
fmt.Print(result)
} else {
output := filepath.Join(*out, filepath.Base(input))
err := ioutil.WriteFile(output, []byte(result), 0666)
if err != nil {
fmt.Printf("%v: %v\n", input, err)
return
}
}
})
}
wg.Wait()
return nil
}
func convert(source, shaderType string) (result string, err error) {
opts := shadertools.ConvertOptions{}
switch shaderType {
case ".vert":
opts.ShaderType = shadertools.TypeVertex
case ".frag":
opts.ShaderType = shadertools.TypeFragment
default:
return "", fmt.Errorf("File extension must be .vert or .frag (seen %v)", shaderType)
}
opts.MakeDebuggable = *debug
opts.CheckAfterChanges = *check
opts.Disassemble = *asm
res, err := shadertools.ConvertGlsl(string(source), &opts)
if err != nil {
return "", err
}
if *asm {
result += "/* Disassembly:\n" + res.DisassemblyString + "\n*/\n"
result += "/* Debug info:\n" + shadertools.FormatDebugInfo(res.Info, " ") + "\n*/\n"
}
result += res.SourceCode
return result, nil
}
|
package xhtml5_test
import (
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("tables", func() {
It("1-line table with 2 cells", func() {
source := `|===
| *foo* foo | _bar_
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock"><strong>foo</strong> foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><em>bar</em></p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("1-line table with 3 cells", func() {
source := `|===
| *foo* foo | _bar_ | baz
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 33.3333%;"/>
<col style="width: 33.3333%;"/>
<col style="width: 33.3334%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock"><strong>foo</strong> foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><em>bar</em></p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">baz</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("table with title, headers and 1 line per cell", func() {
source := `.table title
|===
|Column header 1 |Column header 2
|Column 1, row 1
|Column 2, row 1
|Column 1, row 2
|Column 2, row 2
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<caption class="title">Table 1. table title</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Column header 1</th>
<th class="tableblock halign-left valign-top">Column header 2</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 1, row 1</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 2, row 1</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 1, row 2</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 2, row 2</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("table with title, custom caption", func() {
source := `.table title
[caption="Example I. "]
|===
|Column header 1 |Column header 2
|Column 1, row 1
|Column 2, row 1
|Column 1, row 2
|Column 2, row 2
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<caption class="title">Example I. table title</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Column header 1</th>
<th class="tableblock halign-left valign-top">Column header 2</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 1, row 1</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 2, row 1</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 1, row 2</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 2, row 2</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("empty table ", func() {
source := `|===
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("2 tables with 1 counter", func() {
source := `|===
| foo | bar
|===
.Title 2
|===
| foo | bar
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
<table class="tableblock frame-all grid-all stretch">
<caption class="title">Table 1. Title 2</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("2 tables with no caption label", func() {
source := `:table-caption!:
.Title 1
|===
| foo | bar
|===
.Title 2
|===
| foo | bar
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<caption class="title">Title 1</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
<table class="tableblock frame-all grid-all stretch">
<caption class="title">Title 2</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("2 tables with custom caption label", func() {
source := `:table-caption: Chart
.First
|===
| foo | bar
|===
.Second
|===
| foo | bar
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<caption class="title">Chart 1. First</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
<table class="tableblock frame-all grid-all stretch">
<caption class="title">Chart 2. Second</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("2 tables with 2 counters", func() {
source := `.Title 1
|===
| foo | bar
|===
.Title 2
|===
| foo | bar
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<caption class="title">Table 1. Title 1</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
<table class="tableblock frame-all grid-all stretch">
<caption class="title">Table 2. Title 2</caption>
<colgroup>
<col style="width: 50%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">foo</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">bar</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("autowidth ", func() {
source := "[%autowidth]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all fit-content">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width (number)", func() {
source := "[width=75]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all" style="width: 75%;">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width (percent)", func() {
source := "[width=75%]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all" style="width: 75%;">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width (100 percent)", func() {
source := "[width=100%]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width (> 100 percent)", func() {
source := "[width=205]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width overrides fit", func() {
source := "[%autowidth,width=25]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all" style="width: 25%;">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("fixed width overrides fit (> 100 percent)", func() {
source := "[%autowidth,width=205]\n|===\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("grid, frames, float, stripes", func() {
source := "[%autowidth,grid=rows,frame=sides,stripes=hover,float=right]\n|===\n|==="
expected := `<table class="tableblock frame-sides grid-rows stripes-hover fit-content right">
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("table with cols relative widths", func() {
source := "[cols=\"3,2,5\"]\n|===\n|one|two|three\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 30%;"/>
<col style="width: 20%;"/>
<col style="width: 50%;"/>
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">one</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("table with cols relative widths and header", func() {
source := "[cols=\"3,2,5\"]\n|===\n|h1|h2|h3\n\n|one|two|three\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 30%;"/>
<col style="width: 20%;"/>
<col style="width: 50%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">h1</th>
<th class="tableblock halign-left valign-top">h2</th>
<th class="tableblock halign-left valign-top">h3</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">one</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("autowidth overrides column widths", func() {
source := "[%autowidth,cols=\"3,2,5\"]\n|===\n|h1|h2|h3\n\n|one|two|three\n|==="
expected := `<table class="tableblock frame-all grid-all fit-content">
<colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">h1</th>
<th class="tableblock halign-left valign-top">h2</th>
<th class="tableblock halign-left valign-top">h3</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">one</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("column auto-width", func() {
source := "[cols=\"30,~,~\"]\n|===\n|h1|h2|h3\n\n|one|two|three\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 30%;"/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">h1</th>
<th class="tableblock halign-left valign-top">h2</th>
<th class="tableblock halign-left valign-top">h3</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">one</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("columns with repeat", func() {
source := "[cols=\"3*10,2*~\"]\n|===\n|h1|h2|h3|h4|h5\n\n|one|two|three|four|five\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 10%;"/>
<col style="width: 10%;"/>
<col style="width: 10%;"/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">h1</th>
<th class="tableblock halign-left valign-top">h2</th>
<th class="tableblock halign-left valign-top">h3</th>
<th class="tableblock halign-left valign-top">h4</th>
<th class="tableblock halign-left valign-top">h5</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">one</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">four</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">five</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("columns with alignment changes", func() {
source := "[cols=\"2*^.^,<,.>,>\"]\n|===\n|h1|h2|h3|h4|h5\n\n|one|two|three|four|five\n|==="
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 20%;"/>
<col style="width: 20%;"/>
<col style="width: 20%;"/>
<col style="width: 20%;"/>
<col style="width: 20%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-center valign-middle">h1</th>
<th class="tableblock halign-center valign-middle">h2</th>
<th class="tableblock halign-left valign-top">h3</th>
<th class="tableblock halign-left valign-bottom">h4</th>
<th class="tableblock halign-right valign-top">h5</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-center valign-middle"><p class="tableblock">one</p></td>
<td class="tableblock halign-center valign-middle"><p class="tableblock">two</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">three</p></td>
<td class="tableblock halign-left valign-bottom"><p class="tableblock">four</p></td>
<td class="tableblock halign-right valign-top"><p class="tableblock">five</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("with header option", func() {
source := `[cols="3*^",options="header"]
|===
|Dir (X,Y,Z) |Num Cells |Size
|X |10 |0.1
|Y |5 |0.2
|Z |10 |0.1
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 33.3333%;"/>
<col style="width: 33.3333%;"/>
<col style="width: 33.3334%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-center valign-top">Dir (X,Y,Z)</th>
<th class="tableblock halign-center valign-top">Num Cells</th>
<th class="tableblock halign-center valign-top">Size</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">X</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Y</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">5</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.2</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Z</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("with header and footer options", func() {
source := `[%header%footer,cols="2,2,1"]
|===
|Column 1, header row
|Column 2, header row
|Column 3, header row
|Cell in column 1, row 2
|Cell in column 2, row 2
|Cell in column 3, row 2
|Column 1, footer row
|Column 2, footer row
|Column 3, footer row
|===`
expected := `<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 40%;"/>
<col style="width: 40%;"/>
<col style="width: 20%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Column 1, header row</th>
<th class="tableblock halign-left valign-top">Column 2, header row</th>
<th class="tableblock halign-left valign-top">Column 3, header row</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Cell in column 1, row 2</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Cell in column 2, row 2</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Cell in column 3, row 2</p></td>
</tr>
</tbody>
<tfoot>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 1, footer row</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 2, footer row</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Column 3, footer row</p></td>
</tr>
</tfoot>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("with id and title", func() {
source := `[#non-uniform-mesh]
.Non-Uniform Mesh Parameters
[cols="3*^",options="header"]
|===
|Dir (X,Y,Z) |Num Cells |Size
|X |10 |0.1
|Y |10 |0.1
|Y |5 |0.2
|Z |10 |0.1
|===`
expected := `<table id="non-uniform-mesh" class="tableblock frame-all grid-all stretch">
<caption class="title">Table 1. Non-Uniform Mesh Parameters</caption>
<colgroup>
<col style="width: 33.3333%;"/>
<col style="width: 33.3333%;"/>
<col style="width: 33.3334%;"/>
</colgroup>
<thead>
<tr>
<th class="tableblock halign-center valign-top">Dir (X,Y,Z)</th>
<th class="tableblock halign-center valign-top">Num Cells</th>
<th class="tableblock halign-center valign-top">Size</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">X</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Y</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Y</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">5</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.2</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Z</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
</tbody>
</table>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
// TODO: Verify styles -- it's verified in the parser for now, but we still need to implement styles.
})
|
package cmd
import (
"github.com/spf13/cobra"
)
var testCmd = &cobra.Command{
Use: "test",
Short: "Test custom rules",
}
func init() {
rootCmd.AddCommand(testCmd)
}
|
/*
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package commands
import (
"fmt"
"log"
"os"
"path/filepath"
"omg-cli/config"
"omg-cli/pivnet"
"github.com/alecthomas/kingpin"
)
type CacheTilesCommand struct {
logger *log.Logger
envDir string
tileCacheDir string
pivnetApiToken string
}
const CacheTilesName = "cache-tiles"
func (cmd *CacheTilesCommand) register(app *kingpin.Application) {
c := app.Command(CacheTilesName, "Cache tile downloads locally").Action(cmd.run)
registerEnvConfigFlag(c, &cmd.envDir)
registerTileCacheFlag(c, &cmd.tileCacheDir)
registerPivnetApiTokenFlag(c, &cmd.pivnetApiToken)
}
func (cmd *CacheTilesCommand) run(c *kingpin.ParseContext) error {
pivnetSdk, err := pivnet.NewSdk(cmd.pivnetApiToken, cmd.logger)
if err != nil {
return err
}
envCfg, err := config.ConfigFromEnvDirectory(cmd.envDir)
if err != nil {
return err
}
if _, err := os.Stat(cmd.tileCacheDir); os.IsNotExist(err) {
if err := os.Mkdir(cmd.tileCacheDir, os.ModePerm); err != nil {
return fmt.Errorf("creating tile cache directory %s: %v", cmd.tileCacheDir, err)
}
} else if err != nil {
return fmt.Errorf("finding tile cache directory %s: %v", cmd.tileCacheDir, err)
}
tileCache := pivnet.TileCache{cmd.tileCacheDir}
tiles := selectedTiles(cmd.logger, envCfg)
for _, tile := range tiles {
if tile.BuiltIn() {
continue
}
definition := tile.Definition(&config.EnvConfig{SmallFootprint: true})
cmd.logger.Printf("caching tile: %s", definition.Product.Name)
output := filepath.Join(cmd.tileCacheDir, tileCache.FileName(definition.Pivnet))
file, err := pivnetSdk.DownloadTileToPath(definition.Pivnet, output)
if err != nil {
return fmt.Errorf("downloading tile: %v", err)
}
if err := file.Close(); err != nil {
return fmt.Errorf("closing tile: %v", err)
}
}
return nil
}
|
package main
type Records struct {
Records *[]Record `json:"arrRecords"`
ReturnCode int `json:"intReturnCode,omitempty"`
Message string `json:"strMessage,omitempty"`
}
type Record struct {
Name string `json:"name,omitempty"`
RecordType string `json:"type,omitempty"`
Content string `json:"content,omitempty"`
Ttl int `json:"ttl,omitempty"`
Priority int `json:"prio,omitempty"`
}
func equals(this *Record, that *Record) bool {
return this.Name == that.Name
}
func exists(records []Record, this Record) bool {
return indexOf(records, func(that Record) bool { return that.Name == this.Name }) >= 0
}
func indexOf(records []Record, pred func(record Record) bool) int {
for i := 0; i < len(records); i++ {
r := records[i]
if pred(r) {
return i
}
}
return -1
}
func find(s []Record, record Record) *Record {
index := indexOf(s, func(that Record) bool { return that.Name == record.Name })
if index >= 0 {
return &s[index]
}
return nil
}
func remove(s []Record, record Record) []Record {
index := indexOf(s, func(that Record) bool { return that.Name == record.Name })
if index >= 0 {
s[index] = s[len(s)-1]
return s[:len(s)-1]
}
return s
}
|
// Copyright 2017 The Aiicy Team.
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package base
import (
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"fmt"
"html/template"
"math"
"math/big"
"strings"
"time"
"github.com/Aiicy/AiicyDS/modules/setting"
"github.com/Unknwon/com"
"github.com/Unknwon/i18n"
log "gopkg.in/clog.v1"
)
const DOC_URL = "https://github.com/Aiicy/AiicyDS/wiki"
type (
TplName string
)
// EncodeMD5 encodes string to md5 hex value.
func EncodeMD5(str string) string {
m := md5.New()
m.Write([]byte(str))
return hex.EncodeToString(m.Sum(nil))
}
func ShortSha(sha1 string) string {
if len(sha1) > 10 {
return sha1[:10]
}
return sha1
}
// GetRandomString generate random string by specify chars.
func GetRandomString(n int) (string, error) {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
buffer := make([]byte, n)
max := big.NewInt(int64(len(alphanum)))
for i := 0; i < n; i++ {
index, err := randomInt(max)
if err != nil {
return "", err
}
buffer[i] = alphanum[index]
}
return string(buffer), nil
}
func randomInt(max *big.Int) (int, error) {
rand, err := rand.Int(rand.Reader, max)
if err != nil {
return 0, err
}
return int(rand.Int64()), nil
}
// verify time limit code
func VerifyTimeLimitCode(data string, minutes int, code string) bool {
if len(code) <= 18 {
return false
}
// split code
start := code[:12]
lives := code[12:18]
if d, err := com.StrTo(lives).Int(); err == nil {
minutes = d
}
// right active code
retCode := CreateTimeLimitCode(data, minutes, start)
if retCode == code && minutes > 0 {
// check time is expired or not
before, _ := time.ParseInLocation("200601021504", start, time.Local)
now := time.Now()
if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() {
return true
}
}
return false
}
const TimeLimitCodeLength = 12 + 6 + 40
// create a time limit code
// code format: 12 length date time string + 6 minutes string + 40 sha1 encoded string
func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string {
format := "200601021504"
var start, end time.Time
var startStr, endStr string
if startInf == nil {
// Use now time create code
start = time.Now()
startStr = start.Format(format)
} else {
// use start string create code
startStr = startInf.(string)
start, _ = time.ParseInLocation(format, startStr, time.Local)
startStr = start.Format(format)
}
end = start.Add(time.Minute * time.Duration(minutes))
endStr = end.Format(format)
// create sha1 encode string
sh := sha1.New()
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
encoded := hex.EncodeToString(sh.Sum(nil))
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
return code
}
// HashEmail hashes email address to MD5 string.
// https://en.gravatar.com/site/implement/hash/
func HashEmail(email string) string {
email = strings.ToLower(strings.TrimSpace(email))
h := md5.New()
h.Write([]byte(email))
return hex.EncodeToString(h.Sum(nil))
}
// AvatarLink returns relative avatar link to the site domain by given email,
// which includes app sub-url as prefix. However, it is possible
// to return full URL if user enables Gravatar-like service.
func AvatarLink(email string) (url string) {
if setting.EnableFederatedAvatar && setting.LibravatarService != nil &&
strings.Contains(email, "@") {
var err error
url, err = setting.LibravatarService.FromEmail(email)
if err != nil {
log.Error(2, "LibravatarService.FromEmail [%s]: %v", email, err)
}
}
if len(url) == 0 && !setting.DisableGravatar {
url = setting.GravatarSource + HashEmail(email)
}
if len(url) == 0 {
url = setting.AppSubUrl + "/img/avatar_default.png"
}
return url
}
// Seconds-based time units
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func timeSince(then time.Time, lang string) string {
now := time.Now()
lbl := i18n.Tr(lang, "tool.ago")
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = i18n.Tr(lang, "tool.from_now")
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return i18n.Tr(lang, "tool.now")
case diff <= 2:
return i18n.Tr(lang, "tool.1s", lbl)
case diff < 1*Minute:
return i18n.Tr(lang, "tool.seconds", diff, lbl)
case diff < 2*Minute:
return i18n.Tr(lang, "tool.1m", lbl)
case diff < 1*Hour:
return i18n.Tr(lang, "tool.minutes", diff/Minute, lbl)
case diff < 2*Hour:
return i18n.Tr(lang, "tool.1h", lbl)
case diff < 1*Day:
return i18n.Tr(lang, "tool.hours", diff/Hour, lbl)
case diff < 2*Day:
return i18n.Tr(lang, "tool.1d", lbl)
case diff < 1*Week:
return i18n.Tr(lang, "tool.days", diff/Day, lbl)
case diff < 2*Week:
return i18n.Tr(lang, "tool.1w", lbl)
case diff < 1*Month:
return i18n.Tr(lang, "tool.weeks", diff/Week, lbl)
case diff < 2*Month:
return i18n.Tr(lang, "tool.1mon", lbl)
case diff < 1*Year:
return i18n.Tr(lang, "tool.months", diff/Month, lbl)
case diff < 2*Year:
return i18n.Tr(lang, "tool.1y", lbl)
default:
return i18n.Tr(lang, "tool.years", diff/Year, lbl)
}
}
func RawTimeSince(t time.Time, lang string) string {
return timeSince(t, lang)
}
// TimeSince calculates the time interval and generate user-friendly string.
func TimeSince(t time.Time, lang string) template.HTML {
return template.HTML(fmt.Sprintf(`<span class="time-since" title="%s">%s</span>`, t.Format(setting.TimeFormat), timeSince(t, lang)))
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%dB", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := float64(s) / math.Pow(base, math.Floor(e))
f := "%.0f"
if val < 10 {
f = "%.1f"
}
return fmt.Sprintf(f+"%s", val, suffix)
}
// FileSize calculates the file size and generate user-friendly string.
func FileSize(s int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(uint64(s), 1024, sizes)
}
// Subtract deals with subtraction of all types of number.
func Subtract(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool = true
switch left.(type) {
case int:
rleft = int64(left.(int))
case int8:
rleft = int64(left.(int8))
case int16:
rleft = int64(left.(int16))
case int32:
rleft = int64(left.(int32))
case int64:
rleft = left.(int64)
case float32:
fleft = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
switch right.(type) {
case int:
rright = int64(right.(int))
case int8:
rright = int64(right.(int8))
case int16:
rright = int64(right.(int16))
case int32:
rright = int64(right.(int32))
case int64:
rright = right.(int64)
case float32:
fright = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
if isInt {
return rleft - rright
} else {
return fleft + float64(rleft) - (fright + float64(rright))
}
}
// EllipsisString returns a truncated short string,
// it appends '...' in the end of the length of string is too large.
func EllipsisString(str string, length int) string {
if len(str) < length {
return str
}
return str[:length-3] + "..."
}
// TruncateString returns a truncated string with given limit,
// it returns input string if length is not reached limit.
func TruncateString(str string, limit int) string {
if len(str) < limit {
return str
}
return str[:limit]
}
|
package main
import (
"fmt"
"io"
"net/http"
"os"
)
type logWritter struct{}
func main() {
resp, err := http.Get("https://vidhya03.labkit.in")
if err != nil {
fmt.Println("Error :", err)
os.Exit(-1)
}
if resp.Body != nil {
lw := logWritter{}
rd := textFileReader{}
wtn, err := io.Copy(lw, rd)
if err != nil {
fmt.Println("Error :", err)
os.Exit(-1)
}
fmt.Println(wtn)
}
}
func (logWritter) Write(bs []byte) (int, error) {
fmt.Println(string(bs))
fmt.Println("The total length is ", len(bs))
return len(bs), nil
}
type textFileReader struct{}
func (textFileReader) Read(bs []byte) (int, error) {
return 1, nil
}
|
package common
import (
"strconv"
"time"
)
func TimeToString() string {
return strconv.FormatInt(Time(), 10)
}
func Time() (t int64) {
t = time.Now().UnixNano() / int64(time.Millisecond)
return
}
|
package main
import (
"reflect"
"testing"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/kubereboot/kured/pkg/alerts"
assert "gotest.tools/v3/assert"
papi "github.com/prometheus/client_golang/api"
)
type BlockingChecker struct {
blocking bool
}
func (fbc BlockingChecker) isBlocked() bool {
return fbc.blocking
}
var _ RebootBlocker = BlockingChecker{} // Verify that Type implements Interface.
var _ RebootBlocker = (*BlockingChecker)(nil) // Verify that *Type implements Interface.
func Test_flagCheck(t *testing.T) {
var cmd *cobra.Command
var args []string
slackHookURL = "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
expected := "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
flagCheck(cmd, args)
if notifyURL != expected {
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
}
// validate that surrounding quotes are stripped
slackHookURL = "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\""
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
flagCheck(cmd, args)
if notifyURL != expected {
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
}
slackHookURL = "'https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET'"
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
flagCheck(cmd, args)
if notifyURL != expected {
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
}
slackHookURL = ""
notifyURL = "\"teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com\""
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
flagCheck(cmd, args)
if notifyURL != expected {
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
}
notifyURL = "'teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com'"
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
flagCheck(cmd, args)
if notifyURL != expected {
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
}
}
func Test_stripQuotes(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "string with no surrounding quotes is unchanged",
input: "Hello, world!",
expected: "Hello, world!",
},
{
name: "string with surrounding double quotes should strip quotes",
input: "\"Hello, world!\"",
expected: "Hello, world!",
},
{
name: "string with surrounding single quotes should strip quotes",
input: "'Hello, world!'",
expected: "Hello, world!",
},
{
name: "string with unbalanced surrounding quotes is unchanged",
input: "'Hello, world!\"",
expected: "'Hello, world!\"",
},
{
name: "string with length of one is unchanged",
input: "'",
expected: "'",
},
{
name: "string with length of zero is unchanged",
input: "",
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := stripQuotes(tt.input); !reflect.DeepEqual(got, tt.expected) {
t.Errorf("stripQuotes() = %v, expected %v", got, tt.expected)
}
})
}
}
func Test_rebootBlocked(t *testing.T) {
noCheckers := []RebootBlocker{}
nonblockingChecker := BlockingChecker{blocking: false}
blockingChecker := BlockingChecker{blocking: true}
// Instantiate a prometheusClient with a broken_url
promClient, err := alerts.NewPromClient(papi.Config{Address: "broken_url"})
if err != nil {
log.Fatal("Can't create prometheusClient: ", err)
}
brokenPrometheusClient := PrometheusBlockingChecker{promClient: promClient, filter: nil, firingOnly: false}
type args struct {
blockers []RebootBlocker
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Do not block on no blocker defined",
args: args{blockers: noCheckers},
want: false,
},
{
name: "Ensure a blocker blocks",
args: args{blockers: []RebootBlocker{blockingChecker}},
want: true,
},
{
name: "Ensure a non-blocker doesn't block",
args: args{blockers: []RebootBlocker{nonblockingChecker}},
want: false,
},
{
name: "Ensure one blocker is enough to block",
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
want: true,
},
{
name: "Do block on error contacting prometheus API",
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := rebootBlocked(tt.args.blockers...); got != tt.want {
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildHostCommand(t *testing.T) {
type args struct {
pid int
command []string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure command will run with nsenter",
args: args{pid: 1, command: []string{"ls", "-Fal"}},
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := buildHostCommand(tt.args.pid, tt.args.command); !reflect.DeepEqual(got, tt.want) {
t.Errorf("buildHostCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildSentinelCommand(t *testing.T) {
type args struct {
rebootSentinelFile string
rebootSentinelCommand string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure a sentinelFile generates a shell 'test' command with the right file",
args: args{
rebootSentinelFile: "/test1",
rebootSentinelCommand: "",
},
want: []string{"test", "-f", "/test1"},
},
{
name: "Ensure a sentinelCommand has priority over a sentinelFile if both are provided (because sentinelFile is always provided)",
args: args{
rebootSentinelFile: "/test1",
rebootSentinelCommand: "/sbin/reboot-required -r",
},
want: []string{"/sbin/reboot-required", "-r"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := buildSentinelCommand(tt.args.rebootSentinelFile, tt.args.rebootSentinelCommand); !reflect.DeepEqual(got, tt.want) {
t.Errorf("buildSentinelCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parseRebootCommand(t *testing.T) {
type args struct {
rebootCommand string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure a reboot command is properly parsed",
args: args{
rebootCommand: "/sbin/systemctl reboot",
},
want: []string{"/sbin/systemctl", "reboot"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := parseRebootCommand(tt.args.rebootCommand); !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseRebootCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_rebootRequired(t *testing.T) {
type args struct {
sentinelCommand []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Ensure rc = 0 means reboot required",
args: args{
sentinelCommand: []string{"true"},
},
want: true,
},
{
name: "Ensure rc != 0 means reboot NOT required",
args: args{
sentinelCommand: []string{"false"},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := rebootRequired(tt.args.sentinelCommand); got != tt.want {
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
}
})
}
}
func Test_rebootRequired_fatals(t *testing.T) {
cases := []struct {
param []string
expectFatal bool
}{
{
param: []string{"true"},
expectFatal: false,
},
{
param: []string{"./babar"},
expectFatal: true,
},
}
defer func() { log.StandardLogger().ExitFunc = nil }()
var fatal bool
log.StandardLogger().ExitFunc = func(int) { fatal = true }
for _, c := range cases {
fatal = false
rebootRequired(c.param)
assert.Equal(t, c.expectFatal, fatal)
}
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
type BuildStat struct {
ProductName string `bson:"product_name" json:"productName"`
TotalSuccess int `bson:"total_success" json:"totalSuccess"`
TotalFailure int `bson:"total_failure" json:"totalFailure"`
TotalTimeout int `bson:"total_timeout" json:"totalTimeout"`
TotalDuration int64 `bson:"total_duration" json:"totalDuration"`
TotalBuildCount int `bson:"total_build_count" json:"totalBuildCount"`
MaxDuration int64 `bson:"max_duration" json:"maxDuration"`
MaxDurationPipeline *PipelineInfo `bson:"max_duration_pipeline" json:"maxDurationPipeline"`
Date string `bson:"date" json:"date"`
CreateTime int64 `bson:"create_time" json:"createTime"`
UpdateTime int64 `bson:"update_time" json:"updateTime"`
}
// PipelineInfo
type PipelineInfo struct {
TaskID int64 `bson:"task_id" json:"taskId"`
PipelineName string `bson:"pipeline_name" json:"pipelineName"`
Type string `bson:"type" json:"type"`
MaxDuration int64 `bson:"max_duration" json:"maxDuration"`
}
func (BuildStat) TableName() string {
return "build_stat"
}
|
package errors
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func xx() error {
def := DefInternalError("code1", "err1")
return def.New()
}
func TestPrintDefError(t *testing.T) {
result := fmt.Sprintf("%+v\n", xx())
fmt.Println(result)
require.Contains(t, result, "code1: err1")
require.Contains(t, result, "xx")
}
func TestHttpStatus(t *testing.T) {
require.Equal(t, 500, HttpStatus(DefInternalError("code1", "err1").New().GetType()))
require.Equal(t, 404, HttpStatus(DefNotFound("code1", "err1").New().GetType()))
require.Equal(t, 400, HttpStatus(DefBadRequest("code1", "err1").New().GetType()))
require.Equal(t, 403, HttpStatus(DefForbidden("code1", "err1").New().GetType()))
require.Equal(t, 501, HttpStatus(DefNotImplement("code1", "err1").New().GetType()))
require.Equal(t, 441, HttpStatus(DefTimeout("code1", "err1").New().GetType()))
require.Equal(t, 401, HttpStatus(DefUnauthorized("code1", "err1").New().GetType()))
}
|
package solutions
type WordTree struct {
Nodes [26]*WordTree
Word string
}
func findWords(board [][]byte, words []string) []string {
var result []string
root := buildTrie(words)
for i := 0; i < len(board); i++ {
for j := 0; j < len(board[0]); j++ {
findWord(board, root, i, j, &result)
}
}
return result
}
func buildTrie(words []string) *WordTree {
root := &WordTree{}
for _, word := range words {
current := root
for _, c := range word {
i := c - 'a'
if current.Nodes[i] == nil {
current.Nodes[i] = &WordTree{}
}
current = current.Nodes[i]
}
current.Word = word
}
return root
}
func findWord(board [][]byte, root *WordTree, i, j int, result *[]string) {
if i < 0 || i >= len(board) || j < 0 || j >= len(board[0]) {
return
}
current := board[i][j]
if current == '$' || root.Nodes[current - 'a'] == nil {
return
}
root = root.Nodes[current - 'a']
if len(root.Word) > 0 {
*result = append(*result, root.Word)
root.Word = ""
}
board[i][j] = '$'
findWord(board, root, i + 1, j, result)
findWord(board, root, i - 1, j, result)
findWord(board, root, i, j + 1, result)
findWord(board, root, i, j - 1, result)
board[i][j] = current
}
|
package blade
import (
"testing"
)
func Test_RadixTree(t *testing.T) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
AssertThat(t, tree.Size()).IsEqualTo(0)
txn := tree.OpenTXN()
for _, c := range testCases {
txn.Insert([]byte(c.k), c.v)
}
AssertThat(t, txn.hits).IsEqualTo(0)
AssertThat(t, txn.allocs).IsEqualTo(14)
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(len(testCases))
for _, c := range testCases {
v, ok := tree.Get([]byte(c.k))
AssertThat(t, ok).IsTrue()
AssertThat(t, v).IsEqualTo(c.v)
}
txn = tree.OpenTXN()
for _, c := range testCases {
v := txn.Delete([]byte(c.k))
AssertThat(t, v).IsEqualTo(v)
}
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(0)
}
func Test_RadixTree_DeletePrefix(t *testing.T) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
AssertThat(t, tree.Size()).IsEqualTo(0)
txn := tree.OpenTXN()
for _, c := range testCases {
txn.Insert([]byte(c.k), c.v)
}
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(len(testCases))
txn = tree.OpenTXN()
count := txn.DeletePrefix([]byte("radix"))
AssertThat(t, count).IsEqualTo(3)
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(5)
}
func Test_RadixTree_cache(t *testing.T) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
AssertThat(t, tree.Size()).IsEqualTo(0)
txn := tree.OpenTXN()
txn.MemOptimized()
for _, c := range testCases {
txn.Insert([]byte(c.k), c.v)
}
AssertThat(t, txn.hits).IsEqualTo(9)
AssertThat(t, txn.allocs).IsEqualTo(5)
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(len(testCases))
for _, c := range testCases {
v, ok := tree.Get([]byte(c.k))
AssertThat(t, ok).IsTrue()
AssertThat(t, v).IsEqualTo(c.v)
}
txn = tree.OpenTXN()
txn.MemOptimized()
for _, c := range testCases {
v := txn.Delete([]byte(c.k))
AssertThat(t, v).IsEqualTo(v)
}
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(0)
}
func Test_RadixTree_walk(t *testing.T) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
AssertThat(t, tree.Size()).IsEqualTo(0)
txn := tree.OpenTXN()
for _, c := range testCases {
txn.Insert([]byte(c.k), c.v)
}
tree = txn.Commit()
AssertThat(t, tree.Size()).IsEqualTo(len(testCases))
walkCount := 0
tree.Walk(func(k []byte, v interface{}) bool {
walkCount++
return false
})
AssertThat(t, walkCount).IsEqualTo(len(testCases))
walkPrefixCount := 0
tree.WalkPrefix([]byte("radix"), func(k []byte, v interface{}) bool {
walkPrefixCount++
return false
})
AssertThat(t, walkPrefixCount).IsEqualTo(3)
walkPathCount := 0
tree.WalkPath([]byte("radixTree"), func(k []byte, v interface{}) bool {
walkPathCount++
return false
})
AssertThat(t, walkPathCount).IsEqualTo(4)
}
func Benchmark_RadixTree(b *testing.B) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
txn := tree.OpenTXN()
for i := 0; i < b.N; i++ {
c := testCases[i%len(testCases)]
txn.Insert([]byte(c.k), c.v)
}
tree = txn.Commit()
}
func Benchmark_RadixTree_cache(b *testing.B) {
testCases := []struct {
k string
v string
}{
{"", "empty"},
{"r", ":radix"},
{"radix", "radix"},
{"radixTree", "radixTree"},
{"radixTxn", "radixTxn"},
{"t", "test"},
{"this is a radix tree", "just a radix tree"},
{"yes, you are right", "yes"},
}
tree := NewRadixTree()
txn := tree.OpenTXN()
txn.MemOptimized()
for i := 0; i < b.N; i++ {
c := testCases[i%len(testCases)]
txn.Insert([]byte(c.k), c.v)
}
tree = txn.Commit()
}
|
package cmdgw
import (
"log"
apds "spca/apd/services"
gwif "spca/iif/cmdgw/if"
apdm "spca/apd/models"
// cmdh "spca/apd/cmdorchestrator"
)
// CmdGwHdl ...
var CmdGwHdl map[string]interface{}
var srvlist []string
// CmdGwInit initializes the datastructure and establishes the connection to the Serives ...
func CmdGwInit(){
log.Println("Command Gateway Init")
CmdGwHdl = make(map[string]interface{} )
srvlist = make([]string,1)
srvlist = append(srvlist, "SERVICE-1")
//Get the context for each service
for _, s := range srvlist {
CmdGwHdl[s] = apds.NewServiceCmds(s)
}
log.Println("Command Gateway Init ..... Complete")
}
// CmdScheduler COnstruct the command and then invokes the concurrent command execution ...
func CmdScheduler(cip *apdm.CmdIp, opc chan<- apdm.CmdResult) error {
sname := "SERVICE-1"
log.Printf("CmdScheduler: input : %+v \n", cip)
s := CmdGwHdl[sname].(*apds.Service)
// construct the command
var cmdlist []gwif.CmdGw
// cmdlist = make([]gwif.CmdGw, 1)
if cip.CmdType == apdm.DEPCMD {
depcmd := s.NewCmd(cip.DepCmd,cip.DepCmdParam)
cmdlist = append(cmdlist, depcmd)
}
cmd := s.NewCmd(cip.Cmd, cip.CmdParam)
cmdlist = append(cmdlist, cmd)
go CmdGwRunConc(cip, cmdlist, opc)
log.Println("Trigger Concurrent Exection")
return nil
}
//CmdGwRunConc Concurrent execution of Command and return error propogation
func CmdGwRunConc(cmdinfo *apdm.CmdIp, clist []gwif.CmdGw, opc chan<- apdm.CmdResult) {
// log.Printf("CmdGwRunConc: Start Command Id: %d \n", cmdinfo.Id)
var err error
for _, c := range clist {
err = c.Execute()
if err != nil {
break
}
}
log.Printf("Command execution returned Err: %+v Command Id: %d", err, cmdinfo.Id)
cmdResult := apdm.CmdResult{
Id : cmdinfo.Id,
Err : err,
}
opc <- cmdResult
// log.Println("CmdGwRunConc: End")
}
|
package parser_test
import (
"github.com/bytesparadise/libasciidoc/pkg/parser"
"github.com/bytesparadise/libasciidoc/pkg/types"
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("collect footnotes", func() {
Context("in section titles", func() {
var sectionWithFootnote1 *types.Section
var sectionWithFootnoteRef1 *types.Section
var sectionWithFootnote2 *types.Section
var sectionWithFootnoteRef2 *types.Section
var sectionWithoutFootnote *types.Section
BeforeEach(func() {
sectionWithFootnote1 = &types.Section{
Title: []interface{}{
&types.StringElement{
Content: "cookies",
},
&types.Footnote{
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
},
}
sectionWithFootnoteRef1 = &types.Section{
Title: []interface{}{
&types.StringElement{
Content: "cookies",
},
&types.FootnoteReference{
ID: 1,
},
},
}
sectionWithFootnote2 = &types.Section{
Title: []interface{}{
&types.StringElement{
Content: "pasta",
},
&types.Footnote{
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
},
}
sectionWithFootnoteRef2 = &types.Section{
Title: []interface{}{
&types.StringElement{
Content: "pasta",
},
&types.FootnoteReference{
ID: 2,
Ref: "pasta", // with ref
},
},
}
sectionWithoutFootnote = &types.Section{
Title: []interface{}{
&types.StringElement{
Content: "coffee",
},
},
}
})
It("no footnote", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(BeEmpty())
})
It("single footnote", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
sectionWithFootnote1,
sectionWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
sectionWithFootnoteRef1,
sectionWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
}))
})
It("multiple footnotes in same fragment", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
sectionWithFootnote1,
sectionWithoutFootnote,
sectionWithFootnote2,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
sectionWithFootnoteRef1,
sectionWithoutFootnote,
sectionWithFootnoteRef2,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
{
ID: 2, // set
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
}))
})
It("multiple footnotes in separate fragments", func() {
// given
c := make(chan types.DocumentFragment, 2)
c <- types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
sectionWithFootnote1,
sectionWithoutFootnote,
},
}
c <- types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
sectionWithFootnote2,
sectionWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
sectionWithFootnoteRef1,
sectionWithoutFootnote,
},
}))
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
sectionWithoutFootnote,
sectionWithFootnoteRef2,
sectionWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
{
ID: 2, // set
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
}))
})
})
Context("in paragraphs", func() {
var paragraphWithFootnote1 *types.Paragraph
var paragraphWithFootnoteRef1 *types.Paragraph
var paragraphWithFootnote2 *types.Paragraph
var paragraphWithFootnoteRef2 *types.Paragraph
var paragraphWithoutFootnote *types.Paragraph
BeforeEach(func() {
paragraphWithFootnote1 = &types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
&types.Footnote{
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
},
}
paragraphWithFootnoteRef1 = &types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
&types.FootnoteReference{
ID: 1,
},
},
}
paragraphWithFootnote2 = &types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
&types.Footnote{
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
},
}
paragraphWithFootnoteRef2 = &types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
&types.FootnoteReference{
ID: 2,
Ref: "pasta", // with ref
},
},
}
paragraphWithoutFootnote = &types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "coffee",
},
},
}
})
It("no footnote", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(BeEmpty())
})
It("single footnote", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
paragraphWithFootnote1,
paragraphWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
paragraphWithFootnoteRef1,
paragraphWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
}))
})
It("multiple footnotes in same fragment", func() {
// given
c := make(chan types.DocumentFragment, 1)
c <- types.DocumentFragment{
Elements: []interface{}{
paragraphWithFootnote1,
paragraphWithoutFootnote,
paragraphWithFootnote2,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
paragraphWithFootnoteRef1,
paragraphWithoutFootnote,
paragraphWithFootnoteRef2,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
{
ID: 2, // set
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
}))
})
It("multiple footnotes in separate fragments", func() {
// given
c := make(chan types.DocumentFragment, 2)
c <- types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
paragraphWithFootnote1,
paragraphWithoutFootnote,
},
}
c <- types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
paragraphWithFootnote2,
paragraphWithoutFootnote,
},
}
close(c)
footnotes := types.NewFootnotes()
// when
result := parser.CollectFootnotes(footnotes, make(<-chan interface{}), c)
// then
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
paragraphWithFootnoteRef1,
paragraphWithoutFootnote,
},
}))
Expect(<-result).To(MatchDocumentFragment(types.DocumentFragment{
Elements: []interface{}{
paragraphWithoutFootnote,
paragraphWithFootnoteRef2,
paragraphWithoutFootnote,
},
}))
Expect(footnotes.Notes).To(Equal([]*types.Footnote{
{
ID: 1, // set
Ref: "", // without ref
Elements: []interface{}{
&types.StringElement{
Content: "cookies",
},
},
},
{
ID: 2, // set
Ref: "pasta", // with ref
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
}))
})
})
})
|
// Copyright © 2014-2015 Galvanized Logic Inc.
// Use is governed by a BSD-style license found in the LICENSE file.
package grid
// path is an A* implementation that works with 2D level layouts that conform
// to Plan. See:
// http://en.wikipedia.org/wiki/A*_search_algorithm
// https://www.redblobgames.com/pathfinding/a-star/introduction.html
// http://www.policyalmanac.org/games/aStarTutorial.htm
// http://www.gamasutra.com/view/feature/131505/toward_more_realistic_pathfinding.php?print=1
// Other pathfinding links:
// http://www.ai-blog.net/archives/000152.html (discusses navigation meshes)
// http://grail.cs.washington.edu/projects/crowd-flows/ (flowfield)
// Design Notes:
// • This A* implementation has been lightly optimized for short routes
// on small grids.
// • Scratch variables avoid reallocating memory for sequential path lookups,
// but may not be the way to go if concurrent access is needed.
// • Check all changes against the benchmark provided.
// Path finds a route between two points within a given Plan.
type Path interface {
// Find calculates the path from one point to another. A path of
// x, y points is returned upon success. The returned path will be
// empty if there was no way to get to the destination point.
// The to: tx, ty, and from: fx, fy points are expected to be
// traversable spots.
Find(fx, fy, tx, ty int) (path []int)
}
// NewPath creates a new path finder for the given Plan p.
func NewPath(p Plan) Path { return newPath(p) }
// =============================================================================
// path is the default implementation of Path.
type path struct {
fp Plan // floor plan.
xsz, ysz int // floor plan x,y dimensions
candidates []*node // nodes to be considered.
route []int // scratch for returning path points.
neighbours []*node // scratch for calculating current neighbours.
orthMoveCost int // cost for moving up,down, left, right.
diagMoveCost int // cost for moving diagonally.
nodes map[int]*node // reuse nodes across multiple calls.
}
// newPath is used by test cases to get an initialized path instance.
func newPath(fp Plan) *path {
p := &path{}
p.fp = fp
p.orthMoveCost = 1 // heuristic horizontal, vertical move cost.
p.diagMoveCost = 2 // heuristic diagonal move cost.
p.xsz, p.ysz = fp.Size()
p.candidates = make([]*node, 50) // guess for small paths.
p.neighbours = make([]*node, 8) // max neighbours is 8.
p.route = []int{}
p.nodes = make(map[int]*node, p.xsz*p.ysz)
return p
}
// nodeState values.
const (
isChecked = 1 << iota // Node has been tried in path.
isCandidate = 1 << iota // Node available for trial in path.
)
// id calculates a unique id for a given x, y value. Each node is labelled
// with an id that can be used as map lookups.
func (p *path) id(x, y int) int { return x*p.xsz + y }
// Find calculates the path from one point to another. A path of intermediate
// points is returned upon success. The returned path will be empty if there
// was no way to get to the destination point. The from, to points are expected
// to be valid spots within the path's initialized floor plan.
func (p *path) Find(fx, fy, tx, ty int) (path []int) {
if !p.fp.IsOpen(fx, fy) || !p.fp.IsOpen(tx, ty) {
return p.route[:0] // no path found, return empty list.
}
// reset any previous path data.
p.candidates = p.candidates[:0]
for _, n := range p.nodes {
n.state = 0
}
// create the initial candidate set from the start node.
start := newNode(p.id(fx, fy), fx, fy, 0)
start.projCost = start.heuristic(tx, ty)
p.candidates = []*node{start}
p.nodes[start.id] = start
// continue while there are still nodes to be tried.
destinationNode := p.id(tx, ty)
for len(p.candidates) > 0 {
current := p.closest()
if current.id == destinationNode {
return p.traceBack(p.route[0:0], current) // backtrack to get path from end point.
}
// query neighbours for possible path nodes.
for _, neighbour := range p.neighbourNodes(current) {
if neighbour.state != isCandidate {
neighbour.from = current.id
p.candidates = append(p.candidates, neighbour)
neighbour.state = isCandidate
}
// Update the projected cost for all neighbours since candidates
// may be revisited.
neighbour.projCost = neighbour.pathCost + neighbour.heuristic(tx, ty)
}
}
return p.route[0:0] // no path found, return empty list.
}
// closest returns the node with the lowest f score from the list of open nodes.
// The returned node is removed from the open node list and added to the closed.
func (p *path) closest() *node {
closest := p.candidates[0]
index := 0
for cnt, n := range p.candidates {
if n.projCost < closest.projCost {
closest = n
index = cnt
}
}
// remove closest.
p.candidates = append(p.candidates[:index], p.candidates[index+1:]...)
closest.state = isChecked
return closest
}
// neighbourNodes creates the valid neighbour nodes for the given node.
// The nodes path distance variables are not set. Neighbours are valid if
// they are:
// • inside the floor plan.
// • passable.
// • a diagonal with two passable adjacent neighbours.
func (p *path) neighbourNodes(n *node) []*node {
p.neighbours = p.neighbours[0:0] // reset while preserving memory.
x, y := n.x, n.y
var xplus, xminus, yplus, yminus bool // horizontal/vertical grid spots.
if xplus = p.fp.IsOpen(x+1, y); xplus {
p.addNeighbour(n.x+1, n.y, n.pathCost+p.orthMoveCost)
}
if xminus = p.fp.IsOpen(x-1, y); xminus {
p.addNeighbour(n.x-1, n.y, n.pathCost+p.orthMoveCost)
}
if yplus = p.fp.IsOpen(x, y+1); yplus {
p.addNeighbour(n.x, n.y+1, n.pathCost+p.orthMoveCost)
}
if yminus = p.fp.IsOpen(x, y-1); yminus {
p.addNeighbour(n.x, n.y-1, n.pathCost+p.orthMoveCost)
}
if xminus && yminus && p.fp.IsOpen(x-1, y-1) {
// diagonal: xminus, yminus must be passable.
p.addNeighbour(n.x-1, n.y-1, n.pathCost+p.diagMoveCost)
}
if xminus && yplus && p.fp.IsOpen(x-1, y+1) {
// diagonal: xminus, yplus must be passable.
p.addNeighbour(n.x-1, n.y+1, n.pathCost+p.diagMoveCost)
}
if xplus && yminus && p.fp.IsOpen(x+1, y-1) {
// diagonal: xplus, yminus must be passable.
p.addNeighbour(n.x+1, n.y-1, n.pathCost+p.diagMoveCost)
}
if xplus && yplus && p.fp.IsOpen(x+1, y+1) {
// diagonal: xplus, yplus must be passable.
p.addNeighbour(n.x+1, n.y+1, n.pathCost+p.diagMoveCost)
}
return p.neighbours
}
// addNeighbour creates the new neighbour node and adds it to the current
// neighbour list as along as the node is not on the checked list.
func (p *path) addNeighbour(x, y, cost int) {
id := p.id(x, y)
if n, ok := p.nodes[id]; ok {
if n.state == 0 {
n.pathCost = cost
p.neighbours = append(p.neighbours, n)
} else if n.state == isCandidate {
p.neighbours = append(p.neighbours, n)
}
} else {
n := newNode(id, x, y, cost)
p.nodes[id] = n
p.neighbours = append(p.neighbours, n)
}
}
// traceBack uses recursion to get the from-to path that was discovered.
func (p *path) traceBack(route []int, prev *node) []int {
if prev.from != -1 {
n := p.nodes[prev.from]
route = p.traceBack(route, n)
}
route = append(route, prev.x, prev.y)
return route
}
// =============================================================================
// node keeps distance calculations for a given grid location.
type node struct {
id int // unique node identifer based on x, y.
x, y int // node location.
pathCost int // cost to get here from starting point (g)
projCost int // cost fromStart+aBestGuess to final point (f=g+h).
from int // track the previous node in the path.
state int // track the candidate and checked state.
}
// newNode creates a new node at the given grid location. The distance values
// are initialized to 0.
func newNode(id, x, y, cost int) *node {
return &node{id: id, x: x, y: y, pathCost: cost, from: -1}
}
// heuristic returns an appoximation of the distance between the current
// node and the given point.
func (n *node) heuristic(x, y int) int {
dx, dy := x-n.x, y-n.y
return dx*dx + dy*dy
}
|
package virtual_services
import (
"reflect"
"strings"
"github.com/kiali/kiali/kubernetes"
"github.com/kiali/kiali/models"
)
type SingleHostChecker struct {
Namespace string
VirtualServices []kubernetes.IstioObject
}
type Host struct {
Service string
Namespace string
Cluster string
}
func (in SingleHostChecker) Check() models.IstioValidations {
hostCounter := make(map[string]map[string]map[string][]*kubernetes.IstioObject)
validations := models.IstioValidations{}
for _, vs := range in.VirtualServices {
for _, host := range getHost(vs) {
storeHost(hostCounter, vs, host)
}
}
for _, clusterCounter := range hostCounter {
for _, namespaceCounter := range clusterCounter {
isNamespaceWildcard := len(namespaceCounter["*"]) > 0
for _, serviceCounter := range namespaceCounter {
targetSameHost := len(serviceCounter) > 1
otherServiceHosts := len(namespaceCounter) > 1
for _, virtualService := range serviceCounter {
// Marking virtualService as invalid if:
// - there is more than one virtual service per a host
// - there is one virtual service with wildcard and there are other virtual services pointing
// a host for that namespace
if targetSameHost || isNamespaceWildcard && otherServiceHosts {
if !hasGateways(virtualService) {
multipleVirtualServiceCheck(*virtualService, validations)
}
}
}
}
}
}
return validations
}
func multipleVirtualServiceCheck(virtualService kubernetes.IstioObject, validations models.IstioValidations) {
virtualServiceName := virtualService.GetObjectMeta().Name
key := models.IstioValidationKey{Name: virtualServiceName, ObjectType: "virtualservice"}
checks := models.Build("virtualservices.singlehost", "spec/hosts")
rrValidation := &models.IstioValidation{
Name: virtualServiceName,
ObjectType: "virtualservice",
Valid: true,
Checks: []*models.IstioCheck{
&checks,
},
}
validations.MergeValidations(models.IstioValidations{key: rrValidation})
}
func storeHost(hostCounter map[string]map[string]map[string][]*kubernetes.IstioObject, vs kubernetes.IstioObject, host Host) {
vsList := []*kubernetes.IstioObject{&vs}
if hostCounter[host.Cluster] == nil {
hostCounter[host.Cluster] = map[string]map[string][]*kubernetes.IstioObject{
host.Namespace: {
host.Service: vsList,
},
}
} else if hostCounter[host.Cluster][host.Namespace] == nil {
hostCounter[host.Cluster][host.Namespace] = map[string][]*kubernetes.IstioObject{
host.Service: vsList,
}
} else if _, ok := hostCounter[host.Cluster][host.Namespace][host.Service]; !ok {
hostCounter[host.Cluster][host.Namespace][host.Service] = vsList
} else {
hostCounter[host.Cluster][host.Namespace][host.Service] = append(hostCounter[host.Cluster][host.Namespace][host.Service], &vs)
}
}
func getHost(virtualService kubernetes.IstioObject) []Host {
hosts := virtualService.GetSpec()["hosts"]
if hosts == nil {
return []Host{}
}
slice := reflect.ValueOf(hosts)
if slice.Kind() != reflect.Slice {
return []Host{}
}
targetHosts := make([]Host, 0, slice.Len())
for hostIdx := 0; hostIdx < slice.Len(); hostIdx++ {
hostName, ok := slice.Index(hostIdx).Interface().(string)
if !ok {
continue
}
targetHosts = append(targetHosts, formatHostForSearch(hostName, virtualService.GetObjectMeta().Namespace))
}
return targetHosts
}
// Convert host to Host struct for searching
// e.g. reviews -> reviews, virtualService.Namespace, svc.cluster.local
// e.g. reviews.bookinfo.svc.cluster.local -> reviews, bookinfo, svc.cluster.local
// e.g. *.bookinfo.svc.cluster.local -> *, bookinfo, svc.cluster.local
// e.g. * -> *, *, *
func formatHostForSearch(hostName, virtualServiceNamespace string) Host {
domainParts := strings.Split(hostName, ".")
host := Host{}
host.Service = domainParts[0]
if len(domainParts) > 1 {
host.Namespace = domainParts[1]
if len(domainParts) > 2 {
host.Cluster = strings.Join(domainParts[2:], ".")
}
} else if host.Service != "*" {
host.Namespace = virtualServiceNamespace
host.Cluster = "svc.cluster.local"
} else if host.Service == "*" {
host.Namespace = "*"
host.Cluster = "*"
}
return host
}
func hasGateways(virtualService *kubernetes.IstioObject) bool {
if gateways, ok := (*virtualService).GetSpec()["gateways"]; ok {
vsGateways, ok := (gateways).([]interface{})
return ok && vsGateways != nil && len(vsGateways) > 0
}
return false
}
|
package db
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
type EConfig struct {
UserName string `json:"userName"`
PassWord string `json:"passWord"`
Addr string `json:"addr"`
Port int `json:"port"`
DB string `json:"db"`
}
//初始化gorm
func InitOrm(cfg *EConfig) *gorm.DB {
db, err := gorm.Open("mysql", fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s",
cfg.UserName,
cfg.PassWord,
cfg.Addr,
cfg.Port,
cfg.DB,
))
if err != nil {
fmt.Println(err.Error())
panic("orm init error")
}
db.SingularTable(true)
db.DB().SetMaxIdleConns(30)
db.DB().SetMaxOpenConns(100)
return db
}
|
package server
import (
"github.com/go-squads/comet-backend/domain"
"github.com/go-squads/comet-backend/repository"
"net/http"
"encoding/json"
)
func WithAuth(n http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("Authorization")
auth := repository.GetUserRepository().ValidateUserToken(token)
if auth != false {
n.ServeHTTP(w, r)
return
} else {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(domain.Response{Status: http.StatusUnauthorized, Message: "User Unauthorized"})
return
}
}
}
|
package main
import (
"encoding/json"
"fmt"
"time"
"code-lib/notify/rabbitmq"
proto "subassembly/timer-controller/proto/notify"
)
func main() {
notify, err := rabbitmq.NewRabbitNotify(&rabbitmq.RabbitNotifyConf{
RabbitClientConf: &rabbitmq.RabbitClientConf{
Host: "localhost",
Port: 5672,
UserName: "guest",
Password: "guest",
VHost: "/",
},
Exchange: "cza.test.timer",
RoutingKey: "controller_1s",
Kind: "direct",
PublisherInuse: true,
})
if err != nil {
fmt.Println(err)
return
}
msg := proto.TimerNotice{
Destination: proto.RabbitmqDestination{
Exchange: "cza.test.hello",
RoutingKey: "hello.queue",
},
SendUnixTime: time.Now(),
Expire: time.Second * 10,
Target: json.RawMessage([]byte(`
{
"key": 1
}
`)),
}
senddata, err := json.Marshal(msg)
if err != nil {
fmt.Println(err)
return
}
err = notify.Push(senddata)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("hh")
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func toArray(tr *TreeNode, arr *[]int) {
if tr == nil {
return
}
toArray(tr.Left, arr)
*arr = append(*arr, tr.Val)
toArray(tr.Right, arr)
}
func findTarget(root *TreeNode, k int) bool {
arr := make([]int, 0)
toArray(root, &arr)
mp := make(map[int]bool)
for _, x := range arr {
if _, ok := mp[k-x]; ok {
return true
}
mp[x] = true
}
return false
}
func main() {
}
|
package types
import (
"fmt"
"math/big"
"github.com/zhaohaijun/matrixchain/vm/neovm/interfaces"
)
type Map struct {
_map map[StackItems]StackItems
}
func NewMap() *Map {
var mp Map
mp._map = make(map[StackItems]StackItems)
return &mp
}
func (this *Map) Add(key StackItems, value StackItems) {
for k := range this._map {
if k.Equals(key) {
delete(this._map, k)
break
}
}
this._map[key] = value
}
func (this *Map) Clear() {
this._map = make(map[StackItems]StackItems)
}
func (this *Map) Remove(key StackItems) {
for k := range this._map {
if k.Equals(key) {
delete(this._map, k)
break
}
}
}
func (this *Map) Equals(that StackItems) bool {
return this == that
}
func (this *Map) GetBoolean() (bool, error) {
return true, nil
}
func (this *Map) GetByteArray() ([]byte, error) {
return nil, fmt.Errorf("%s", "Not support map to byte array")
}
func (this *Map) GetBigInteger() (*big.Int, error) {
return nil, fmt.Errorf("%s", "Not support map to integer")
}
func (this *Map) GetInterface() (interfaces.Interop, error) {
return nil, fmt.Errorf("%s", "Not support map to interface")
}
func (this *Map) GetArray() ([]StackItems, error) {
return nil, fmt.Errorf("%s", "Not support map to array")
}
func (this *Map) GetStruct() ([]StackItems, error) {
return nil, fmt.Errorf("%s", "Not support map to struct")
}
func (this *Map) GetMap() (map[StackItems]StackItems, error) {
return this._map, nil
}
func (this *Map) TryGetValue(key StackItems) StackItems {
for k, v := range this._map {
if k.Equals(key) {
return v
}
}
return nil
}
func (this *Map) IsMapKey() bool {
return false
}
|
package main
// Leetcode 5786. (medium)
func maximumRemovals(s string, p string, removable []int) int {
left, right := 0, len(removable)-1
for left <= right {
mid := left + (right-left)/2
m := make(map[int]bool)
for i := 0; i <= mid; i++ {
m[removable[i]] = true
}
if existsOfMaximumRemovals(s, p, m) {
left = mid + 1
} else {
right = mid - 1
}
}
return left
}
func existsOfMaximumRemovals(s string, p string, remove map[int]bool) bool {
i, j := 0, 0
for i < len(s) && j < len(p) {
if s[i] == p[j] && !remove[i] {
j++
}
i++
}
return j == len(p)
}
|
package test
import (
"time"
)
// Unit 单元信息
type Unit struct {
ID int `orm:"id key auto"`
I8 int8 `orm:"i8"`
I16 int16 `orm:"i16"`
I32 int32 `orm:"i32"`
I64 uint64 `orm:"i64"`
Name string `orm:"name"`
Value float32 `orm:"value"`
F64 float64 `orm:"f64"`
TimeStamp time.Time `orm:"ts"`
Flag bool `orm:"flag"`
IArray []int `orm:"iArray"`
FArray []float32 `orm:"fArray"`
StrArray []string `orm:"strArray"`
}
// ExtUnit ExtUnit
type ExtUnit struct {
ID int `orm:"id key auto"`
Unit *Unit `orm:"unit"`
}
// ExtUnitList ExtUnitList
type ExtUnitList struct {
ID int `orm:"id key auto"`
Unit Unit `orm:"unit"`
UnitList []Unit `orm:"unitlist"`
}
|
package session
import (
"context"
"database/sql"
"sync"
"time"
"shared/protobuf/pb"
"shared/utility/errors"
"shared/utility/event"
"shared/utility/glog"
"shared/utility/param"
"shared/utility/servertime"
"shared/common"
"shared/utility/safe"
"shared/utility/session"
"gamesvr/manager"
"gamesvr/model"
)
const (
// session status
StatusInit = 0 // 初始化
StatusOnline = 1 // 在线
StatusOffline = 2 // 离线
)
type Builder struct{}
func (b *Builder) NewSession() session.Session {
sess := &Session{
EmbedManagedSession: &session.EmbedManagedSession{},
User: &model.User{},
}
return sess
}
type Session struct {
sync.RWMutex
*session.EmbedManagedSession
*model.User
portalSvr string
// GuildServer string
// GuildContext *balancer.Context
Token string
CTime int64
Status int8
Serial int
}
func (s *Session) OnCreated(ctx context.Context, opts session.OnCreatedOpts) error {
s.User = model.NewUser(opts.ID)
err := manager.MySQL.Load(ctx, s.User)
if err != nil {
if err == sql.ErrNoRows {
if opts.AllowNil {
// 建号时初始设置
err = s.User.InitForCreate(ctx)
if err != nil {
glog.Errorf(" user InitForCreate error: %v", err)
return err
}
err = manager.MySQL.Create(ctx, s.User)
if err != nil {
glog.Errorf("new user error: %v", err)
return err
}
} else {
return common.ErrUserNotFound
}
} else {
glog.Errorf("load user error: %v", err)
return err
}
}
glog.Debugf("load user: %+v", *s.User)
s.User.Init(ctx)
s.ScheduleCall(5*time.Minute, func() {
s.UpdateUserPower()
// 玩家数据落地
safe.Exec(5, func(i int) error {
s.Lock()
defer s.Unlock()
err := manager.MySQL.Save(context.Background(), s.User)
if err != nil {
glog.Errorf("save user error: %v, times: %d ,\n", err, i)
}
return err
})
// 更新在线状态
err := manager.Global.UserOnline(context.Background(), s.ID, servertime.Now().Unix(), 6*time.Minute)
if err != nil {
glog.Errorf("user online error: %v", err)
}
// 更新玩家每个sr和ssr的等级星级与战力到redis
err = s.UserCharacterSimpleUpdate(context.Background())
if err != nil {
glog.Errorf("UserCharacterSimpleUpdate err: %v", err)
}
})
s.ScheduleCall(30*time.Second, func() {
s.Lock()
defer s.Unlock()
manager.EventQueue.ExecuteEventsInQueue(context.Background(), s.User.ID)
// 每半分钟检查一下是否借到了角色
s.MercenaryCharacterList(context.Background())
err := s.GuildDataRefresh(context.Background())
if err != nil {
}
err = s.PushGreetings(context.Background())
if err != nil {
glog.Errorf("push greetings error: %v\n", err)
}
err = s.PushGuildTasks(context.Background())
if err != nil {
glog.Errorf("push guild task items error: %v\n", err)
}
s.User.On30Second(servertime.Now().Unix())
})
s.ScheduleCall(time.Second, func() {
s.Lock()
defer s.Unlock()
s.User.OnSecond(servertime.Now().Unix())
s.TimerPush()
})
s.ScheduleCall(time.Hour, func() {
s.Lock()
defer s.Unlock()
s.User.OnHour()
})
// 注册redis事件
s.RegisterEventQueue(ctx)
// 处理上线逻辑
s.Online(ctx)
return nil
}
// RegisterEventQueue 注册redis事件并执行
func (s *Session) RegisterEventQueue(ctx context.Context) {
//
event.UserEventHandler.Register(s.User.ID, common.EventTypeGraveyardHelp, func(Param *param.Param) error {
HelpType, err := Param.GetInt(0)
if err != nil {
return errors.WrapTrace(err)
}
BuildUid, err := Param.GetInt64(1)
if err != nil {
return errors.WrapTrace(err)
}
Sec, err := Param.GetInt32(2)
if err != nil {
return errors.WrapTrace(err)
}
HelpAt, err := Param.GetInt64(3)
if err != nil {
return errors.WrapTrace(err)
}
s.User.GraveyardReceiveHelp(HelpType, BuildUid, Sec, HelpAt)
return nil
})
event.UserEventHandler.Register(s.User.ID, common.EventTypeYggdrasilMail, func(Param *param.Param) error {
fromUserName, err := Param.GetString(0)
if err != nil {
return errors.WrapTrace(err)
}
goodsId, err := Param.GetInt64(1)
if err != nil {
return errors.WrapTrace(err)
}
return s.User.Yggdrasil.TryTakeBackGoods(context.Background(), s.User.ID, fromUserName, goodsId)
})
event.UserEventHandler.Register(s.User.ID, common.EventTypeYggdrasilIntimacyChange, func(Param *param.Param) error {
userId, err := Param.GetInt64(0)
if err != nil {
return errors.WrapTrace(err)
}
intimacy, err := Param.GetInt32(1)
if err != nil {
return errors.WrapTrace(err)
}
totalIntimacy, err := Param.GetInt32(2)
if err != nil {
return errors.WrapTrace(err)
}
s.User.AddYggPush(&pb.S2CYggdrasilIntimacyChange{
UserId: userId,
IntimacyValue: intimacy,
TotalIntimacy: totalIntimacy,
})
return nil
})
// 执行
manager.EventQueue.ExecuteEventsInQueue(ctx, s.User.ID)
}
func (s *Session) OnTriggered(ctx context.Context) {
}
func (s *Session) OnClosed() {
s.Lock()
defer s.Unlock()
glog.Debugf("OnClosed(): user: %d", s.ID)
// 处理下线逻辑
s.Offline()
ctx := context.Background()
safe.Exec(5, func(i int) error {
err := manager.MySQL.Save(ctx, s.User)
if err != nil {
glog.Errorf("save user error: %v, times: %d ,\n", err, i)
}
return err
})
safe.Exec(5, func(i int) error {
return manager.RPCServer.DelRecord(ctx, s.ID)
})
safe.Exec(5, func(i int) error {
return manager.RPCServer.DecrBalance(ctx)
})
}
func (s *Session) PrepareContext(ctx context.Context) context.Context {
// TODO: 记录server减少redis压力
// ctx = manager.RPCGuildClient.WithSinglecastCtx(ctx, grpc.SinglecastOpt(s.Guild.GuildID, ""))
// ctx = manager.RPCPortalClient.WithSinglecastCtx(ctx, grpc.SinglecastOpt(s.ID, s.portalSvr))
return ctx
}
func (s *Session) Defer(ctx context.Context) {
s.MessagePush()
}
func (s *Session) RefreshPortalServer(server string) {
s.portalSvr = server
}
// session存在即为上线
func (s *Session) Online(ctx context.Context) {
// s.ConnToken = token
// s.PortalServer = name
now := servertime.Now().Unix()
s.Status = StatusOnline
s.User.OnOnline(now)
// return nil
// 更新在线状态
err := manager.Global.UserOnline(ctx, s.ID, now, 6*time.Minute)
if err != nil {
glog.Errorf("UserOnline() error: %v", err)
}
// 更新公会玩家登录状态
err = s.GuildSync(ctx, common.UserOnline)
if err != nil {
glog.Errorf("GuildSync() error: %v", err)
}
}
// session关闭即为下线
func (s *Session) Offline() {
// s.ConnToken = ""
// s.PortalServer = ""
s.Status = StatusOffline
s.User.OnOffline()
// 更新在线状态
err := manager.Global.UserOffline(context.Background(), s.ID)
if err != nil {
glog.Errorf("user online error: %v", err)
}
// 更新公会玩家登录状态
err = s.GuildSync(
// make ctx
context.Background(),
// manager.RPCGuildClient.WithSinglecastCtx(
// context.Background(),
// grpc.SinglecastOpt(s.Guild.GuildID, ""),
// ),
common.UserOffline,
)
if err != nil {
glog.Errorf("GuildSync() error: %v", err)
}
}
// func (s *Session) Close() {
// // save to db
// err := manager.MySQL.Save(s.User)
// if err != nil {
// log.Printf("save user,%v\n", err)
// }
// }
//
// func (s *Session) Online(token, name string) {
// s.ConnToken = token
// s.ServerName = name
// s.Status = StatusOnline
// }
//
// func (s *Session) Offline() {
// s.ConnToken = ""
// s.ServerName = ""
// s.Status = StatusOffline
// }
func (s *Session) CheckToken(token string, ctime int64) error {
if s.Token == "" {
// replace new token
s.Token = token
s.CTime = ctime
return nil
}
if s.Token != token {
if ctime <= s.CTime {
return common.ErrUserLoginInOtherClient
}
// replace new token
s.Token = token
s.CTime = ctime
}
return nil
}
|
package steam
import (
"net/url"
"github.com/Jleagle/valve-data-format-go/vdf"
"strconv"
)
// for now use keyvalues, eventually want to use something more
// encoding/json-like, with type assertions & all
type Depot struct {
kv vdf.KeyValue
}
func (d Depot) Name() string {
return d.kv.Key
}
func (d Depot) GetManifestMap() map[string]string {
manifests_kv, _ := d.kv.GetChild("manifests")
return manifests_kv.GetChildrenAsMap()
}
func (d Depot) Size() int64 {
size, _ := d.kv.GetChild("MaxSize")
size2, _ := strconv.Atoi(size.Value)
return int64(size2)
}
func (d Depot) URL() url.URL {
// http://cache20-iad1.steamcontent.com/depot/1113281
return url.URL{
Scheme: "http",
Host: "cache2-iad1.steamcontent.com", // todo: dynamic
Path: "/depot/" + d.Name(),
}
}
func (d Depot) GetManifest(id string) (Manifest, error) {
return DownloadManifest(ManifestUrl(d.URL(), id))
}
func (d Depot) GetOSList() map[OS]struct{} {
config, _ := d.kv.GetChild("config")
oslist_kv, _ := config.GetChild("oslist")
return fillOSList(oslist_kv.Value)
}
func (d Depot) GetManifestByLabel(label string) (Manifest, error) {
return d.GetManifest(d.GetManifestMap()[label])
}
|
package mail
import (
"bytes"
"strconv"
"text/template"
"intra-hub/confperso"
"intra-hub/models"
"archive/zip"
"github.com/astaxie/beego"
"github.com/astaxie/beego/utils"
"io/ioutil"
"os"
"path"
"strings"
"time"
)
const (
activationURL = confperso.Protocol + "://" + confperso.Domain + "/users/activate/"
)
func SendUserCreated(user *models.User) error {
data := make(map[string]interface{})
data["User"] = user
data["Link"] = activationURL + strconv.FormatInt(int64(user.Id), 10) + "/" + user.Token
tmpl, err := template.New("mail").Parse(templateUserCreated)
if err != nil {
beego.Error(err)
return err
}
tmplSubject, err := template.New("subject").Parse(subjectUserCreated)
if err != nil {
beego.Error(err)
return err
}
b := bytes.NewBufferString("")
if err := tmpl.Execute(b, data); err != nil {
beego.Error(err)
return err
}
bsubject := bytes.NewBufferString("")
if err := tmplSubject.Execute(bsubject, data); err != nil {
beego.Error(err)
return err
}
sendMail(user.Email, bsubject.String(), b.String())
return nil
}
func SendForgotPassword(user *models.User) error {
data := make(map[string]interface{})
data["User"] = user
data["Link"] = activationURL + strconv.FormatInt(int64(user.Id), 10) + "/" + user.Token
tmpl, err := template.New("mail").Parse(templateForgotPassword)
if err != nil {
beego.Error(err)
return err
}
tmplSubject, err := template.New("subject").Parse(subjectForgotPassword)
if err != nil {
beego.Error(err)
return err
}
b := bytes.NewBufferString("")
if err := tmpl.Execute(b, data); err != nil {
beego.Error(err)
return err
}
bsubject := bytes.NewBufferString("")
if err := tmplSubject.Execute(bsubject, data); err != nil {
beego.Error(err)
return err
}
sendMail(user.Email, bsubject.String(), b.String())
return nil
}
func sendMail(to string, subject, body string) {
config := `{"username":"` + confperso.EmailUsername + `","password":"` + confperso.EmailPassword + `","host":"` +
confperso.EmailHost + `","port":` + confperso.EmailHostPort + `}`
email := utils.NewEMail(config)
email.Subject = subject
email.To = []string{to}
email.HTML = body
email.From = confperso.EmailUsername
if err := email.Send(); err != nil {
beego.Warn("MAIL ERROR", err)
}
}
func SendBackupEmail(filepath string) error {
config := `{"username":"` + confperso.EmailUsername + `","password":"` + confperso.EmailPassword + `","host":"` +
confperso.EmailHost + `","port":` + confperso.EmailHostPort + `}`
email := utils.NewEMail(config)
email.Subject = "Backup - " + time.Now().Format(time.RFC3339)
email.To = []string{confperso.EmailUsername}
email.From = confperso.EmailUsername
fileName := path.Base(filepath)
zipName := path.Dir(filepath) + "/" + strings.TrimSuffix(fileName, path.Ext(fileName)) + ".zip"
zipFile, err := os.Create(zipName)
if err != nil {
return err
}
w := zip.NewWriter(zipFile)
file, err := os.Open(filepath)
if err != nil {
return err
}
defer file.Close()
f, err := w.Create(fileName)
if err != nil {
return err
}
sqlFile, err := ioutil.ReadFile(filepath)
if err != nil {
return err
}
if _, err := f.Write(sqlFile); err != nil {
return err
}
if err := w.Close(); err != nil {
return err
}
zipFile.Close()
if zipFile, err = os.Open(zipName); err != nil {
return err
}
if _, err := email.Attach(zipFile, path.Base(zipName), "application/zip"); err != nil {
return err
}
if err := email.Send(); err != nil {
return err
}
beego.Warn(filepath, fileName, zipName)
return nil
}
|
package nlputils
import (
"strings"
"golang.org/x/text/unicode/norm"
)
// ToWords will convert a string into a series of word values
func ToWords(str string) (words []string) {
var wordBuf []rune
// Normalize string before working with it
str = norm.NFC.String(str)
// Lowercase string before working with it
str = strings.ToLower(str)
for _, char := range str {
switch char {
case ' ', '?', '.', '!', ',':
if len(wordBuf) == 0 {
break
}
word := string(wordBuf)
words = append(words, word)
wordBuf = wordBuf[:0]
default:
wordBuf = append(wordBuf, char)
}
}
if len(wordBuf) > 0 {
word := string(wordBuf)
words = append(words, word)
}
return
}
|
// Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package importccl
import (
"bufio"
"context"
"fmt"
"io"
"regexp"
"strings"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
)
type postgreStream struct {
ctx context.Context
s *bufio.Scanner
copy *postgreStreamCopy
unsupportedStmtLogger *unsupportedStmtLogger
}
// newPostgreStream returns a struct that can stream statements from an
// io.Reader.
func newPostgreStream(
ctx context.Context, r io.Reader, max int, unsupportedStmtLogger *unsupportedStmtLogger,
) *postgreStream {
s := bufio.NewScanner(r)
s.Buffer(nil, max)
p := &postgreStream{ctx: ctx, s: s, unsupportedStmtLogger: unsupportedStmtLogger}
s.Split(p.split)
return p
}
func (p *postgreStream) split(data []byte, atEOF bool) (advance int, token []byte, err error) {
if p.copy == nil {
return splitSQLSemicolon(data, atEOF)
}
return bufio.ScanLines(data, atEOF)
}
// splitSQLSemicolon is a bufio.SplitFunc that splits on SQL semicolon tokens.
func splitSQLSemicolon(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if pos, ok := parser.SplitFirstStatement(string(data)); ok {
return pos, data[:pos], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
// Next returns the next statement. The type of statement can be one of
// tree.Statement, copyData, or errCopyDone. A nil statement and io.EOF are
// returned when there are no more statements.
func (p *postgreStream) Next() (interface{}, error) {
if p.copy != nil {
row, err := p.copy.Next()
if errors.Is(err, errCopyDone) {
p.copy = nil
return errCopyDone, nil
}
return row, err
}
for p.s.Scan() {
t := p.s.Text()
skipOverComments(t)
stmts, err := parser.Parse(t)
if err != nil {
// There are some statements that CRDB is unable to parse. If the user has
// indicated that they want to skip these stmts during the IMPORT, then do
// so here.
if p.unsupportedStmtLogger.ignoreUnsupported && errors.HasType(err, (*tree.UnsupportedError)(nil)) {
if unsupportedErr := (*tree.UnsupportedError)(nil); errors.As(err, &unsupportedErr) {
err := p.unsupportedStmtLogger.log(unsupportedErr.FeatureName, true /* isParseError */)
if err != nil {
return nil, err
}
}
continue
}
return nil, wrapErrorWithUnsupportedHint(err)
}
switch len(stmts) {
case 0:
// Got whitespace or comments; try again.
case 1:
// If the statement is COPY ... FROM STDIN, set p.copy so the next call to
// this function will read copy data. We still return this COPY statement
// for this invocation.
if cf, ok := stmts[0].AST.(*tree.CopyFrom); ok && cf.Stdin {
// Set p.copy which reconfigures the scanner's split func.
p.copy = newPostgreStreamCopy(p.s, copyDefaultDelimiter, copyDefaultNull)
// We expect a single newline character following the COPY statement before
// the copy data starts.
if !p.s.Scan() {
return nil, errors.Errorf("expected empty line")
}
if err := p.s.Err(); err != nil {
return nil, err
}
if len(p.s.Bytes()) != 0 {
return nil, errors.Errorf("expected empty line")
}
}
return stmts[0].AST, nil
default:
return nil, errors.Errorf("unexpected: got %d statements", len(stmts))
}
}
if err := p.s.Err(); err != nil {
if errors.Is(err, bufio.ErrTooLong) {
err = wrapWithLineTooLongHint(
errors.HandledWithMessage(err, "line too long"),
)
}
return nil, err
}
return nil, io.EOF
}
var (
ignoreComments = regexp.MustCompile(`^\s*(--.*)`)
)
func skipOverComments(s string) {
// Look for the first line with no whitespace or comments.
for {
m := ignoreComments.FindStringIndex(s)
if m == nil {
break
}
s = s[m[1]:]
}
}
type regclassRewriter struct{}
var _ tree.Visitor = regclassRewriter{}
func (regclassRewriter) VisitPre(expr tree.Expr) (recurse bool, newExpr tree.Expr) {
switch t := expr.(type) {
case *tree.FuncExpr:
switch t.Func.String() {
case "nextval":
if len(t.Exprs) > 0 {
switch e := t.Exprs[0].(type) {
case *tree.CastExpr:
if typ, ok := tree.GetStaticallyKnownType(e.Type); ok && typ.Oid() == oid.T_regclass {
// tree.Visitor says we should make a copy, but since copyNode is unexported
// and there's no planner here, I think it's safe to directly modify the
// statement here.
t.Exprs[0] = e.Expr
}
}
}
}
}
return true, expr
}
func (regclassRewriter) VisitPost(expr tree.Expr) tree.Expr { return expr }
// removeDefaultRegclass removes `::regclass` casts from sequence operations
// (i.e., nextval) in DEFAULT column expressions.
func removeDefaultRegclass(create *tree.CreateTable) {
for _, def := range create.Defs {
switch def := def.(type) {
case *tree.ColumnTableDef:
if def.DefaultExpr.Expr != nil {
def.DefaultExpr.Expr, _ = tree.WalkExpr(regclassRewriter{}, def.DefaultExpr.Expr)
}
}
}
}
type schemaAndTableName struct {
schema string
table string
}
func (s *schemaAndTableName) String() string {
var ret string
if s.schema != "" {
ret += s.schema + "."
}
ret += s.table
return ret
}
type schemaParsingObjects struct {
createSchema map[string]*tree.CreateSchema
createTbl map[schemaAndTableName]*tree.CreateTable
createSeq map[schemaAndTableName]*tree.CreateSequence
tableFKs map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef
}
func createPostgresSchemas(
ctx context.Context,
parentID descpb.ID,
createSchema map[string]*tree.CreateSchema,
execCfg *sql.ExecutorConfig,
user security.SQLUsername,
) ([]*schemadesc.Mutable, error) {
var dbDesc catalog.DatabaseDescriptor
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
var err error
dbDesc, err = catalogkv.MustGetDatabaseDescByID(ctx, txn, execCfg.Codec, parentID)
return err
}); err != nil {
return nil, err
}
schemaDescs := make([]*schemadesc.Mutable, 0)
for _, schema := range createSchema {
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
desc, _, err := sql.CreateUserDefinedSchemaDescriptor(ctx, user, schema, txn, execCfg,
dbDesc, false /* allocateID */)
if err != nil {
return err
}
// This is true when the schema exists and we are processing a
// CREATE SCHEMA IF NOT EXISTS statement.
if desc == nil {
return nil
}
// We didn't allocate an ID above, so we must assign it a mock ID until it
// is assigned an actual ID later in the import.
desc.ID = getNextPlaceholderDescID()
desc.State = descpb.DescriptorState_OFFLINE
desc.OfflineReason = "importing"
schemaDescs = append(schemaDescs, desc)
return err
}); err != nil {
return nil, err
}
}
return schemaDescs, nil
}
func createPostgresSequences(
ctx context.Context,
parentID descpb.ID,
createSeq map[schemaAndTableName]*tree.CreateSequence,
fks fkHandler,
walltime int64,
owner security.SQLUsername,
schemaNameToDesc map[string]*schemadesc.Mutable,
) ([]*tabledesc.Mutable, error) {
ret := make([]*tabledesc.Mutable, 0)
for schemaAndTableName, seq := range createSeq {
schemaID := descpb.ID(keys.PublicSchemaID)
if schemaAndTableName.schema != "" && schemaAndTableName.schema != "public" {
var desc *schemadesc.Mutable
var ok bool
if desc, ok = schemaNameToDesc[schemaAndTableName.schema]; !ok {
return nil, errors.Newf("schema %s not found in the schemas created from the pgdump",
schemaAndTableName.schema)
}
schemaID = desc.ID
}
desc, err := sql.NewSequenceTableDesc(
ctx,
schemaAndTableName.table,
seq.Options,
parentID,
schemaID,
getNextPlaceholderDescID(),
hlc.Timestamp{WallTime: walltime},
descpb.NewDefaultPrivilegeDescriptor(owner),
tree.PersistencePermanent,
nil, /* params */
// If this is multi-region, this will get added by WriteDescriptors.
false, /* isMultiRegion */
)
if err != nil {
return nil, err
}
fks.resolver.tableNameToDesc[schemaAndTableName.String()] = desc
ret = append(ret, desc)
}
return ret, nil
}
func createPostgresTables(
evalCtx *tree.EvalContext,
p sql.JobExecContext,
createTbl map[schemaAndTableName]*tree.CreateTable,
fks fkHandler,
backrefs map[descpb.ID]*tabledesc.Mutable,
parentID descpb.ID,
walltime int64,
schemaNameToDesc map[string]*schemadesc.Mutable,
) ([]*tabledesc.Mutable, error) {
ret := make([]*tabledesc.Mutable, 0)
for schemaAndTableName, create := range createTbl {
if create == nil {
continue
}
schemaID := descpb.ID(keys.PublicSchemaID)
if schemaAndTableName.schema != "" && schemaAndTableName.schema != tree.PublicSchema {
var desc *schemadesc.Mutable
var ok bool
if desc, ok = schemaNameToDesc[schemaAndTableName.schema]; !ok {
return nil, errors.Newf("schema %s not found in the schemas created from the pgdump",
schemaAndTableName.schema)
}
schemaID = desc.ID
}
removeDefaultRegclass(create)
desc, err := MakeSimpleTableDescriptor(evalCtx.Ctx(), p.SemaCtx(), p.ExecCfg().Settings,
create, parentID, schemaID, getNextPlaceholderDescID(), fks, walltime)
if err != nil {
return nil, err
}
fks.resolver.tableNameToDesc[schemaAndTableName.String()] = desc
backrefs[desc.ID] = desc
ret = append(ret, desc)
}
return ret, nil
}
func resolvePostgresFKs(
evalCtx *tree.EvalContext,
tableFKs map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef,
fks fkHandler,
backrefs map[descpb.ID]*tabledesc.Mutable,
) error {
for schemaAndTableName, constraints := range tableFKs {
desc := fks.resolver.tableNameToDesc[schemaAndTableName.String()]
if desc == nil {
continue
}
for _, constraint := range constraints {
if constraint.Table.Schema() == "" {
return errors.Errorf("schema expected to be non-empty when resolving postgres FK %s",
constraint.Name.String())
}
constraint.Table.ExplicitSchema = true
// Add a dummy catalog name to aid in object resolution.
if constraint.Table.Catalog() == "" {
constraint.Table.ExplicitCatalog = true
constraint.Table.CatalogName = "defaultdb"
}
if err := sql.ResolveFK(
evalCtx.Ctx(), nil /* txn */, &fks.resolver, desc, constraint, backrefs, sql.NewTable,
tree.ValidationDefault, evalCtx,
); err != nil {
return err
}
}
if err := fixDescriptorFKState(desc); err != nil {
return err
}
}
return nil
}
var placeholderDescID = defaultCSVTableID
// getNextPlaceholderDescID returns a monotonically increasing placeholder ID
// that is used when creating table, sequence and schema descriptors during the
// schema parsing phase of a PGDUMP import.
// We assign these descriptors "fake" IDs because it is early in the IMPORT
// execution and we do not want to blow through GenerateUniqueDescID calls only
// to fail during the verification phase before we actually begin ingesting
// data. Thus, we pessimistically wait till all the verification steps in the
// IMPORT have been completed after which we rewrite the descriptor IDs with
// "real" unique IDs.
func getNextPlaceholderDescID() descpb.ID {
ret := placeholderDescID
placeholderDescID++
return ret
}
// readPostgresCreateTable returns table descriptors for all tables or the
// matching table from SQL statements.
func readPostgresCreateTable(
ctx context.Context,
input io.Reader,
evalCtx *tree.EvalContext,
p sql.JobExecContext,
match string,
parentID descpb.ID,
walltime int64,
fks fkHandler,
max int,
owner security.SQLUsername,
unsupportedStmtLogger *unsupportedStmtLogger,
) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
// Modify the CreateTable stmt with the various index additions. We do this
// instead of creating a full table descriptor first and adding indexes
// later because MakeSimpleTableDescriptor calls the sql package which calls
// AllocateIDs which adds the hidden rowid and default primary key. This means
// we'd have to delete the index and row and modify the column family. This
// is much easier and probably safer too.
schemaObjects := schemaParsingObjects{
createSchema: make(map[string]*tree.CreateSchema),
createTbl: make(map[schemaAndTableName]*tree.CreateTable),
createSeq: make(map[schemaAndTableName]*tree.CreateSequence),
tableFKs: make(map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef),
}
ps := newPostgreStream(ctx, input, max, unsupportedStmtLogger)
for {
stmt, err := ps.Next()
if err == io.EOF {
tables := make([]*tabledesc.Mutable, 0, len(schemaObjects.createTbl))
schemaNameToDesc := make(map[string]*schemadesc.Mutable)
schemaDescs, err := createPostgresSchemas(ctx, parentID, schemaObjects.createSchema,
p.ExecCfg(), p.User())
if err != nil {
return nil, nil, err
}
for _, schemaDesc := range schemaDescs {
schemaNameToDesc[schemaDesc.GetName()] = schemaDesc
}
// Construct sequence descriptors.
seqs, err := createPostgresSequences(
ctx,
parentID,
schemaObjects.createSeq,
fks,
walltime,
owner,
schemaNameToDesc,
)
if err != nil {
return nil, nil, err
}
tables = append(tables, seqs...)
// Construct table descriptors.
backrefs := make(map[descpb.ID]*tabledesc.Mutable)
tableDescs, err := createPostgresTables(evalCtx, p, schemaObjects.createTbl, fks, backrefs,
parentID, walltime, schemaNameToDesc)
if err != nil {
return nil, nil, err
}
tables = append(tables, tableDescs...)
// Resolve FKs.
err = resolvePostgresFKs(evalCtx, schemaObjects.tableFKs, fks, backrefs)
if err != nil {
return nil, nil, err
}
if match != "" && len(tables) != 1 {
found := make([]string, 0, len(schemaObjects.createTbl))
for schemaAndTableName := range schemaObjects.createTbl {
found = append(found, schemaAndTableName.String())
}
return nil, nil, errors.Errorf("table %q not found in file (found tables: %s)", match,
strings.Join(found, ", "))
}
if len(tables) == 0 {
return nil, nil, errors.Errorf("no table definition found")
}
return tables, schemaDescs, nil
}
if err != nil {
return nil, nil, errors.Wrap(err, "postgres parse error")
}
if err := readPostgresStmt(ctx, evalCtx, match, fks, &schemaObjects, stmt, p,
parentID, unsupportedStmtLogger); err != nil {
return nil, nil, err
}
}
}
func readPostgresStmt(
ctx context.Context,
evalCtx *tree.EvalContext,
match string,
fks fkHandler,
schemaObjects *schemaParsingObjects,
stmt interface{},
p sql.JobExecContext,
parentID descpb.ID,
unsupportedStmtLogger *unsupportedStmtLogger,
) error {
ignoreUnsupportedStmts := unsupportedStmtLogger.ignoreUnsupported
switch stmt := stmt.(type) {
case *tree.CreateSchema:
name, err := getSchemaName(&stmt.Schema)
if err != nil {
return err
}
// If a target table is specified we do not want to create any user defined
// schemas. This is because we only allow specifying target table's in the
// public schema.
if match != "" {
break
}
schemaObjects.createSchema[name] = stmt
case *tree.CreateTable:
schemaQualifiedName, err := getSchemaAndTableName(&stmt.Table)
if err != nil {
return err
}
isMatch := match == "" || match == schemaQualifiedName.String()
if isMatch {
schemaObjects.createTbl[schemaQualifiedName] = stmt
} else {
schemaObjects.createTbl[schemaQualifiedName] = nil
}
case *tree.CreateIndex:
if stmt.Predicate != nil {
return unimplemented.NewWithIssue(50225, "cannot import a table with partial indexes")
}
schemaQualifiedTableName, err := getSchemaAndTableName(&stmt.Table)
if err != nil {
return err
}
create := schemaObjects.createTbl[schemaQualifiedTableName]
if create == nil {
break
}
var idx tree.TableDef = &tree.IndexTableDef{
Name: stmt.Name,
Columns: stmt.Columns,
Storing: stmt.Storing,
Inverted: stmt.Inverted,
Interleave: stmt.Interleave,
PartitionByIndex: stmt.PartitionByIndex,
}
if stmt.Unique {
idx = &tree.UniqueConstraintTableDef{IndexTableDef: *idx.(*tree.IndexTableDef)}
}
create.Defs = append(create.Defs, idx)
case *tree.AlterSchema:
switch stmt.Cmd {
default:
}
case *tree.AlterTable:
schemaQualifiedTableName, err := getSchemaAndTableName2(stmt.Table)
if err != nil {
return err
}
create := schemaObjects.createTbl[schemaQualifiedTableName]
if create == nil {
break
}
for _, cmd := range stmt.Cmds {
switch cmd := cmd.(type) {
case *tree.AlterTableAddConstraint:
switch con := cmd.ConstraintDef.(type) {
case *tree.ForeignKeyConstraintTableDef:
if !fks.skip {
if con.Table.Schema() == "" {
con.Table.SchemaName = tree.PublicSchemaName
}
schemaObjects.tableFKs[schemaQualifiedTableName] = append(schemaObjects.tableFKs[schemaQualifiedTableName], con)
}
default:
create.Defs = append(create.Defs, cmd.ConstraintDef)
}
case *tree.AlterTableSetDefault:
found := false
for i, def := range create.Defs {
def, ok := def.(*tree.ColumnTableDef)
// If it's not a column definition, or the column name doesn't match,
// we're not interested in this column.
if !ok || def.Name != cmd.Column {
continue
}
def.DefaultExpr.Expr = cmd.Default
create.Defs[i] = def
found = true
break
}
if !found {
return colinfo.NewUndefinedColumnError(cmd.Column.String())
}
case *tree.AlterTableSetVisible:
found := false
for i, def := range create.Defs {
def, ok := def.(*tree.ColumnTableDef)
// If it's not a column definition, or the column name doesn't match,
// we're not interested in this column.
if !ok || def.Name != cmd.Column {
continue
}
def.Hidden = !cmd.Visible
create.Defs[i] = def
found = true
break
}
if !found {
return colinfo.NewUndefinedColumnError(cmd.Column.String())
}
case *tree.AlterTableAddColumn:
if cmd.IfNotExists {
if ignoreUnsupportedStmts {
err := unsupportedStmtLogger.log(stmt.String(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported statement: %s", stmt))
}
create.Defs = append(create.Defs, cmd.ColumnDef)
case *tree.AlterTableSetNotNull:
found := false
for i, def := range create.Defs {
def, ok := def.(*tree.ColumnTableDef)
// If it's not a column definition, or the column name doesn't match,
// we're not interested in this column.
if !ok || def.Name != cmd.Column {
continue
}
def.Nullable.Nullability = tree.NotNull
create.Defs[i] = def
found = true
break
}
if !found {
return colinfo.NewUndefinedColumnError(cmd.Column.String())
}
default:
if ignoreUnsupportedStmts {
err := unsupportedStmtLogger.log(stmt.String(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported statement: %s", stmt))
}
}
case *tree.AlterTableOwner:
if ignoreUnsupportedStmts {
return unsupportedStmtLogger.log(stmt.String(), false /* isParseError */)
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported statement: %s", stmt))
case *tree.CreateSequence:
schemaQualifiedTableName, err := getSchemaAndTableName(&stmt.Name)
if err != nil {
return err
}
if match == "" || match == schemaQualifiedTableName.String() {
schemaObjects.createSeq[schemaQualifiedTableName] = stmt
}
case *tree.AlterSequence:
if ignoreUnsupportedStmts {
return unsupportedStmtLogger.log(stmt.String(), false /* isParseError */)
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported %T statement: %s", stmt, stmt))
// Some SELECT statements mutate schema. Search for those here.
case *tree.Select:
switch sel := stmt.Select.(type) {
case *tree.SelectClause:
for _, selExpr := range sel.Exprs {
switch expr := selExpr.Expr.(type) {
case *tree.FuncExpr:
// Look for function calls that mutate schema (this is actually a thing).
semaCtx := tree.MakeSemaContext()
if _, err := expr.TypeCheck(ctx, &semaCtx, nil /* desired */); err != nil {
// If the expression does not type check, it may be a case of using
// a column that does not exist yet in a setval call (as is the case
// of PGDUMP output from ogr2ogr). We're not interested in setval
// calls during schema reading so it is safe to ignore this for now.
if f := expr.Func.String(); pgerror.GetPGCode(err) == pgcode.UndefinedColumn && f == "setval" {
continue
}
return err
}
ov := expr.ResolvedOverload()
// Search for a SQLFn, which returns a SQL string to execute.
fn := ov.SQLFn
if fn == nil {
err := errors.Errorf("unsupported function call: %s in stmt: %s",
expr.Func.String(), stmt.String())
if ignoreUnsupportedStmts {
err := unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
// Attempt to convert all func exprs to datums.
datums := make(tree.Datums, len(expr.Exprs))
for i, ex := range expr.Exprs {
d, ok := ex.(tree.Datum)
if !ok {
// We got something that wasn't a datum so we can't call the
// overload. Since this is a SQLFn and the user would have
// expected us to execute it, we have to error.
return errors.Errorf("unsupported statement: %s", stmt)
}
datums[i] = d
}
// Now that we have all of the datums, we can execute the overload.
fnSQL, err := fn(evalCtx, datums)
if err != nil {
return err
}
// We have some sql. Parse and process it.
fnStmts, err := parser.Parse(fnSQL)
if err != nil {
return err
}
for _, fnStmt := range fnStmts {
switch ast := fnStmt.AST.(type) {
case *tree.AlterTable:
if err := readPostgresStmt(ctx, evalCtx, match, fks, schemaObjects, ast, p,
parentID, unsupportedStmtLogger); err != nil {
return err
}
default:
// We only support ALTER statements returned from a SQLFn.
return errors.Errorf("unsupported statement: %s", stmt)
}
}
default:
err := errors.Errorf("unsupported %T SELECT expr: %s", expr, expr)
if ignoreUnsupportedStmts {
err := unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
}
default:
err := errors.Errorf("unsupported %T SELECT %s", sel, sel)
if ignoreUnsupportedStmts {
err := unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
return nil
}
return wrapErrorWithUnsupportedHint(err)
}
case *tree.DropTable:
names := stmt.Names
// If we find a table with the same name in the target DB we are importing
// into and same public schema, then we throw an error telling the user to
// drop the conflicting existing table to proceed.
// Otherwise, we silently ignore the drop statement and continue with the import.
for _, name := range names {
tableName := name.ToUnresolvedObjectName().String()
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
err := catalogkv.CheckObjectCollision(
ctx,
txn,
p.ExecCfg().Codec,
parentID,
keys.PublicSchemaID,
tree.NewUnqualifiedTableName(tree.Name(tableName)),
)
if err != nil {
return errors.Wrapf(err, `drop table "%s" and then retry the import`, tableName)
}
return nil
}); err != nil {
return err
}
}
case *tree.BeginTransaction, *tree.CommitTransaction:
// Ignore transaction statements as they have no meaning during an IMPORT.
// TODO(during review): Should we guard these statements under the
// ignore_unsupported flag as well?
case *tree.Insert, *tree.CopyFrom, *tree.Delete, copyData:
// handled during the data ingestion pass.
case *tree.CreateExtension, *tree.CommentOnDatabase, *tree.CommentOnTable,
*tree.CommentOnIndex, *tree.CommentOnColumn, *tree.SetVar, *tree.Analyze:
// These are the statements that can be parsed by CRDB but are not
// supported, or are not required to be processed, during an IMPORT.
// - ignore txns.
// - ignore SETs and DMLs.
// - ANALYZE is syntactic sugar for CreateStatistics. It can be ignored
// because the auto stats stuff will pick up the changes and run if needed.
if ignoreUnsupportedStmts {
return unsupportedStmtLogger.log(fmt.Sprintf("%s", stmt), false /* isParseError */)
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported %T statement: %s", stmt, stmt))
case error:
if !errors.Is(stmt, errCopyDone) {
return stmt
}
default:
if ignoreUnsupportedStmts {
return unsupportedStmtLogger.log(fmt.Sprintf("%s", stmt), false /* isParseError */)
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported %T statement: %s", stmt, stmt))
}
return nil
}
func getSchemaName(sc *tree.ObjectNamePrefix) (string, error) {
if sc.ExplicitCatalog {
return "", unimplemented.Newf("import into database specified in dump file",
"explicit catalog schemas unsupported: %s", sc.CatalogName.String()+sc.SchemaName.String())
}
return sc.SchemaName.String(), nil
}
func getSchemaAndTableName(tn *tree.TableName) (schemaAndTableName, error) {
var ret schemaAndTableName
ret.schema = tree.PublicSchema
if tn.Schema() != "" {
ret.schema = tn.Schema()
}
ret.table = tn.Table()
return ret, nil
}
// getTableName variant for UnresolvedObjectName.
func getSchemaAndTableName2(u *tree.UnresolvedObjectName) (schemaAndTableName, error) {
var ret schemaAndTableName
ret.schema = tree.PublicSchema
if u.NumParts >= 2 && u.Parts[1] != "" {
ret.schema = u.Parts[1]
}
ret.table = u.Parts[0]
return ret, nil
}
type pgDumpReader struct {
tableDescs map[string]catalog.TableDescriptor
tables map[string]*row.DatumRowConverter
descs map[string]*execinfrapb.ReadImportDataSpec_ImportTable
kvCh chan row.KVBatch
opts roachpb.PgDumpOptions
walltime int64
colMap map[*row.DatumRowConverter](map[string]int)
jobID int64
unsupportedStmtLogger *unsupportedStmtLogger
evalCtx *tree.EvalContext
}
var _ inputConverter = &pgDumpReader{}
// newPgDumpReader creates a new inputConverter for pg_dump files.
func newPgDumpReader(
ctx context.Context,
jobID int64,
kvCh chan row.KVBatch,
opts roachpb.PgDumpOptions,
walltime int64,
descs map[string]*execinfrapb.ReadImportDataSpec_ImportTable,
evalCtx *tree.EvalContext,
) (*pgDumpReader, error) {
tableDescs := make(map[string]catalog.TableDescriptor, len(descs))
converters := make(map[string]*row.DatumRowConverter, len(descs))
colMap := make(map[*row.DatumRowConverter](map[string]int))
for name, table := range descs {
if table.Desc.IsTable() {
tableDesc := tabledesc.NewBuilder(table.Desc).BuildImmutableTable()
colSubMap := make(map[string]int, len(table.TargetCols))
targetCols := make(tree.NameList, len(table.TargetCols))
for i, colName := range table.TargetCols {
targetCols[i] = tree.Name(colName)
}
for i, col := range tableDesc.VisibleColumns() {
colSubMap[col.GetName()] = i
}
conv, err := row.NewDatumRowConverter(ctx, tableDesc, targetCols, evalCtx, kvCh,
nil /* seqChunkProvider */)
if err != nil {
return nil, err
}
converters[name] = conv
colMap[conv] = colSubMap
tableDescs[name] = tableDesc
} else if table.Desc.IsSequence() {
seqDesc := tabledesc.NewBuilder(table.Desc).BuildImmutableTable()
tableDescs[name] = seqDesc
}
}
return &pgDumpReader{
kvCh: kvCh,
tableDescs: tableDescs,
tables: converters,
descs: descs,
opts: opts,
walltime: walltime,
colMap: colMap,
jobID: jobID,
evalCtx: evalCtx,
}, nil
}
func (m *pgDumpReader) start(ctx ctxgroup.Group) {
}
func (m *pgDumpReader) readFiles(
ctx context.Context,
dataFiles map[int32]string,
resumePos map[int32]int64,
format roachpb.IOFileFormat,
makeExternalStorage cloud.ExternalStorageFactory,
user security.SQLUsername,
) error {
// Setup logger to handle unsupported DML statements seen in the PGDUMP file.
m.unsupportedStmtLogger = makeUnsupportedStmtLogger(ctx, user,
m.jobID, format.PgDump.IgnoreUnsupported, format.PgDump.IgnoreUnsupportedLog, dataIngestion,
makeExternalStorage)
err := readInputFiles(ctx, dataFiles, resumePos, format, m.readFile, makeExternalStorage, user)
if err != nil {
return err
}
return m.unsupportedStmtLogger.flush()
}
func wrapErrorWithUnsupportedHint(err error) error {
return errors.WithHintf(err,
"To ignore unsupported statements and log them for review post IMPORT, see the options listed"+
" in the docs: %s", "https://www.cockroachlabs.com/docs/stable/import.html#import-options")
}
func (m *pgDumpReader) readFile(
ctx context.Context, input *fileReader, inputIdx int32, resumePos int64, rejected chan string,
) error {
tableNameToRowsProcessed := make(map[string]int64)
var inserts, count int64
rowLimit := m.opts.RowLimit
ps := newPostgreStream(ctx, input, int(m.opts.MaxRowSize), m.unsupportedStmtLogger)
semaCtx := tree.MakeSemaContext()
for _, conv := range m.tables {
conv.KvBatch.Source = inputIdx
conv.FractionFn = input.ReadFraction
conv.CompletedRowFn = func() int64 {
return count
}
}
for {
stmt, err := ps.Next()
if err == io.EOF {
break
}
if err != nil {
return errors.Wrap(err, "postgres parse error")
}
switch i := stmt.(type) {
case *tree.Insert:
n, ok := i.Table.(*tree.TableName)
if !ok {
return errors.Errorf("unexpected: %T", i.Table)
}
name, err := getSchemaAndTableName(n)
if err != nil {
return errors.Wrapf(err, "%s", i)
}
conv, ok := m.tables[name.String()]
if !ok {
// not importing this table.
continue
}
if ok && conv == nil {
return errors.Errorf("missing schema info for requested table %q", name)
}
expectedColLen := len(i.Columns)
if expectedColLen == 0 {
// Case where the targeted columns are not specified in the PGDUMP file, but in
// the command "IMPORT INTO table (targetCols) PGDUMP DATA (filename)"
expectedColLen = len(conv.VisibleCols)
}
timestamp := timestampAfterEpoch(m.walltime)
values, ok := i.Rows.Select.(*tree.ValuesClause)
if !ok {
if m.unsupportedStmtLogger.ignoreUnsupported {
logLine := fmt.Sprintf("%s: unsupported by IMPORT\n",
i.Rows.Select.String())
err := m.unsupportedStmtLogger.log(logLine, false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(errors.Errorf("unsupported: %s", i.Rows.Select))
}
inserts++
startingCount := count
var targetColMapIdx []int
if len(i.Columns) != 0 {
targetColMapIdx = make([]int, len(i.Columns))
conv.TargetColOrds = util.FastIntSet{}
for j := range i.Columns {
colName := string(i.Columns[j])
idx, ok := m.colMap[conv][colName]
if !ok {
return errors.Newf("targeted column %q not found", colName)
}
conv.TargetColOrds.Add(idx)
targetColMapIdx[j] = idx
}
// For any missing columns, fill those to NULL.
// These will get filled in with the correct default / computed expression
// provided conv.IsTargetCol is not set for the given column index.
for idx := range conv.VisibleCols {
if !conv.TargetColOrds.Contains(idx) {
conv.Datums[idx] = tree.DNull
}
}
}
for _, tuple := range values.Rows {
count++
tableNameToRowsProcessed[name.String()]++
if count <= resumePos {
continue
}
if rowLimit != 0 && tableNameToRowsProcessed[name.String()] > rowLimit {
break
}
if got := len(tuple); expectedColLen != got {
return errors.Errorf("expected %d values, got %d: %v", expectedColLen, got, tuple)
}
for j, expr := range tuple {
idx := j
if len(i.Columns) != 0 {
idx = targetColMapIdx[j]
}
typed, err := expr.TypeCheck(ctx, &semaCtx, conv.VisibleColTypes[idx])
if err != nil {
return errors.Wrapf(err, "reading row %d (%d in insert statement %d)",
count, count-startingCount, inserts)
}
converted, err := typed.Eval(conv.EvalCtx)
if err != nil {
return errors.Wrapf(err, "reading row %d (%d in insert statement %d)",
count, count-startingCount, inserts)
}
conv.Datums[idx] = converted
}
if err := conv.Row(ctx, inputIdx, count+int64(timestamp)); err != nil {
return err
}
}
case *tree.CopyFrom:
if !i.Stdin {
return errors.New("expected STDIN option on COPY FROM")
}
name, err := getSchemaAndTableName(&i.Table)
if err != nil {
return errors.Wrapf(err, "%s", i)
}
conv, importing := m.tables[name.String()]
if importing && conv == nil {
return errors.Errorf("missing schema info for requested table %q", name)
}
var targetColMapIdx []int
if conv != nil {
targetColMapIdx = make([]int, len(i.Columns))
conv.TargetColOrds = util.FastIntSet{}
for j := range i.Columns {
colName := string(i.Columns[j])
idx, ok := m.colMap[conv][colName]
if !ok {
return errors.Newf("targeted column %q not found", colName)
}
conv.TargetColOrds.Add(idx)
targetColMapIdx[j] = idx
}
}
for {
row, err := ps.Next()
// We expect an explicit copyDone here. io.EOF is unexpected.
if err == io.EOF {
return makeRowErr("", count, pgcode.ProtocolViolation,
"unexpected EOF")
}
if row == errCopyDone {
break
}
count++
tableNameToRowsProcessed[name.String()]++
if err != nil {
return wrapRowErr(err, "", count, pgcode.Uncategorized, "")
}
if !importing {
continue
}
if count <= resumePos {
continue
}
switch row := row.(type) {
case copyData:
if expected, got := conv.TargetColOrds.Len(), len(row); expected != got {
return makeRowErr("", count, pgcode.Syntax,
"expected %d values, got %d", expected, got)
}
if rowLimit != 0 && tableNameToRowsProcessed[name.String()] > rowLimit {
break
}
for i, s := range row {
idx := targetColMapIdx[i]
if s == nil {
conv.Datums[idx] = tree.DNull
} else {
// We use ParseAndRequireString instead of ParseDatumStringAs
// because postgres dumps arrays in COPY statements using their
// internal string representation.
conv.Datums[idx], _, err = tree.ParseAndRequireString(conv.VisibleColTypes[idx], *s, conv.EvalCtx)
if err != nil {
col := conv.VisibleCols[idx]
return wrapRowErr(err, "", count, pgcode.Syntax,
"parse %q as %s", col.Name, col.Type.SQLString())
}
}
}
if err := conv.Row(ctx, inputIdx, count); err != nil {
return err
}
default:
return makeRowErr("", count, pgcode.Uncategorized,
"unexpected: %v", row)
}
}
case *tree.Select:
// Look for something of the form "SELECT pg_catalog.setval(...)". Any error
// or unexpected value silently breaks out of this branch. We are silent
// instead of returning an error because we expect input to be well-formatted
// by pg_dump, and thus if it isn't, we don't try to figure out what to do.
sc, ok := i.Select.(*tree.SelectClause)
if !ok {
err := errors.Errorf("unsupported %T Select: %v", i.Select, i.Select)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
if len(sc.Exprs) != 1 {
err := errors.Errorf("unsupported %d select args: %v", len(sc.Exprs), sc.Exprs)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
fn, ok := sc.Exprs[0].Expr.(*tree.FuncExpr)
if !ok {
err := errors.Errorf("unsupported select arg %T: %v", sc.Exprs[0].Expr, sc.Exprs[0].Expr)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
switch funcName := strings.ToLower(fn.Func.String()); funcName {
case "search_path", "pg_catalog.set_config":
err := errors.Errorf("unsupported %d fn args in select: %v", len(fn.Exprs), fn.Exprs)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
case "setval", "pg_catalog.setval":
if args := len(fn.Exprs); args < 2 || args > 3 {
err := errors.Errorf("unsupported %d fn args in select: %v", len(fn.Exprs), fn.Exprs)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return err
}
seqname, ok := fn.Exprs[0].(*tree.StrVal)
if !ok {
if nested, nestedOk := fn.Exprs[0].(*tree.FuncExpr); nestedOk && nested.Func.String() == "pg_get_serial_sequence" {
// ogr2ogr dumps set the seq for the PK by a) looking up the seqname
// and then b) running an aggregate on the just-imported data to
// determine the max value. We're not going to do any of that, but
// we can just ignore all of this because we mapped their "serial"
// to our rowid anyway so there is no seq to maintain.
continue
}
return errors.Errorf("unsupported setval %T arg: %v", fn.Exprs[0], fn.Exprs[0])
}
seqval, ok := fn.Exprs[1].(*tree.NumVal)
if !ok {
err := errors.Errorf("unsupported setval %T arg: %v", fn.Exprs[1], fn.Exprs[1])
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
val, err := seqval.AsInt64()
if err != nil {
return errors.Wrap(err, "unsupported setval arg")
}
isCalled := false
if len(fn.Exprs) == 3 {
called, ok := fn.Exprs[2].(*tree.DBool)
if !ok {
err := errors.Errorf("unsupported setval %T arg: %v", fn.Exprs[2], fn.Exprs[2])
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return err
}
isCalled = bool(*called)
}
name, err := parser.ParseTableName(seqname.RawString())
if err != nil {
break
}
seqName := name.Parts[0]
if name.Schema() != "" {
seqName = fmt.Sprintf("%s.%s", name.Schema(), name.Object())
}
seq := m.tableDescs[seqName]
if seq == nil {
break
}
key, val, err := sql.MakeSequenceKeyVal(m.evalCtx.Codec, seq, val, isCalled)
if err != nil {
return wrapRowErr(err, "", count, pgcode.Uncategorized, "")
}
kv := roachpb.KeyValue{Key: key}
kv.Value.SetInt(val)
m.kvCh <- row.KVBatch{
Source: inputIdx, KVs: []roachpb.KeyValue{kv}, Progress: input.ReadFraction(),
}
case "addgeometrycolumn":
// handled during schema extraction.
default:
err := errors.Errorf("unsupported function %s in stmt %s", funcName, i.Select.String())
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
case *tree.CreateExtension, *tree.CommentOnDatabase, *tree.CommentOnTable,
*tree.CommentOnIndex, *tree.CommentOnColumn, *tree.AlterSequence:
// handled during schema extraction.
case *tree.SetVar, *tree.BeginTransaction, *tree.CommitTransaction, *tree.Analyze:
// handled during schema extraction.
case *tree.CreateTable, *tree.CreateSchema, *tree.AlterTable, *tree.AlterTableOwner,
*tree.CreateIndex, *tree.CreateSequence, *tree.DropTable:
// handled during schema extraction.
default:
err := errors.Errorf("unsupported %T statement: %v", i, i)
if m.unsupportedStmtLogger.ignoreUnsupported {
err := m.unsupportedStmtLogger.log(err.Error(), false /* isParseError */)
if err != nil {
return err
}
continue
}
return wrapErrorWithUnsupportedHint(err)
}
}
for _, conv := range m.tables {
if err := conv.SendBatch(ctx); err != nil {
return err
}
}
return nil
}
func wrapWithLineTooLongHint(err error) error {
return errors.WithHintf(
err,
"use `max_row_size` to increase the maximum line limit (default: %s).",
humanizeutil.IBytes(defaultScanBuffer),
)
}
|
//Package etl the solution the etl exercism problem
package etl
import "strings"
//Transform implements takes the input map and transforms it to the correct format
func Transform(input map[int][]string) (output map[string]int) {
output = make(map[string]int)
for key, val := range input {
for _, letter := range val {
output[strings.ToLower(letter)] = key
}
}
return output
}
|
package main
import (
"fmt"
)
import "net/http"
func main() {
fmt.Println("Hello World")
http.HandleFunc("/myroute", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("<h1>A simple web server</h1>"))
})
http.ListenAndServe(":8080", nil)
}
|
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"encoding/json"
lru "github.com/hashicorp/golang-lru"
"net/url"
"testing"
"time"
"github.com/cloudevents/sdk-go/v2/event"
cetypes "github.com/cloudevents/sdk-go/v2/types"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/test/diff"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func strptr(s string) *string { return &s }
func getEventData(run interface{}) map[string]string {
cdeCloudEventData := map[string]string{}
switch v := run.(type) {
case *v1beta1.TaskRun:
data, err := json.Marshal(v)
if err != nil {
panic(err)
}
cdeCloudEventData["taskrun"] = string(data)
case *v1beta1.PipelineRun:
data, err := json.Marshal(v)
if err != nil {
panic(err)
}
cdeCloudEventData["pipelinerun"] = string(data)
}
return cdeCloudEventData
}
func getEventToTest(eventtype string, run interface{}) *event.Event {
e := event.Event{
Context: event.EventContextV1{
Type: eventtype,
Source: cetypes.URIRef{URL: url.URL{Path: "/foo/bar/source"}},
ID: "test-event",
Time: &cetypes.Timestamp{Time: time.Now()},
Subject: strptr("topic"),
}.AsV1(),
}
if err := e.SetData("text/json", getEventData(run)); err != nil {
panic(err)
}
return &e
}
func getTaskRunByMeta(name string, namespace string) *v1beta1.TaskRun {
return &v1beta1.TaskRun{
TypeMeta: metav1.TypeMeta{
Kind: "TaskRun",
APIVersion: "v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1beta1.TaskRunSpec{},
Status: v1beta1.TaskRunStatus{},
}
}
func getPipelineRunByMeta(name string, namespace string) *v1beta1.PipelineRun {
return &v1beta1.PipelineRun{
TypeMeta: metav1.TypeMeta{
Kind: "PipelineRun",
APIVersion: "v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1beta1.PipelineRunSpec{},
Status: v1beta1.PipelineRunStatus{},
}
}
// TestEventsKey verifies that keys are extracted correctly from events
func TestEventsKey(t *testing.T) {
testcases := []struct {
name string
eventtype string
run interface{}
wantKey string
}{{
name: "taskrun event",
eventtype: "my.test.taskrun.event",
run: getTaskRunByMeta("mytaskrun", "mynamespace"),
wantKey: "my.test.taskrun.event/taskrun/mynamespace/mytaskrun",
}, {
name: "pipelinerun event",
eventtype: "my.test.pipelinerun.event",
run: getPipelineRunByMeta("mypipelinerun", "mynamespace"),
wantKey: "my.test.pipelinerun.event/pipelinerun/mynamespace/mypipelinerun",
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
gotEvent := getEventToTest(tc.eventtype, tc.run)
gotKey := EventKey(gotEvent)
if d := cmp.Diff(tc.wantKey, gotKey); d != "" {
t.Errorf("Wrong Event key %s", diff.PrintWantGot(d))
}
})
}
}
func TestAddCheckEvent(t *testing.T) {
run := getTaskRunByMeta("arun", "anamespace")
runb := getTaskRunByMeta("arun", "bnamespace")
pipelinerun := getPipelineRunByMeta("arun", "anamespace")
baseEvent := getEventToTest("some.event.type", run)
testcases := []struct {
name string
firstEvent *event.Event
secondEvent *event.Event
wantFound bool
}{{
name: "identical events",
firstEvent: baseEvent,
secondEvent: baseEvent,
wantFound: true,
}, {
name: "new timestamp event",
firstEvent: baseEvent,
secondEvent: getEventToTest("some.event.type", run),
wantFound: true,
}, {
name: "different namespace",
firstEvent: baseEvent,
secondEvent: getEventToTest("some.event.type", runb),
wantFound: false,
}, {
name: "different resource type",
firstEvent: baseEvent,
secondEvent: getEventToTest("some.event.type", pipelinerun),
wantFound: false,
}, {
name: "different event type",
firstEvent: baseEvent,
secondEvent: getEventToTest("some.other.event.type", run),
wantFound: false,
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testCache, _ := lru.New(10)
AddEventSentToCache(testCache, tc.firstEvent)
found, _ := IsCloudEventSent(testCache, tc.secondEvent)
if d := cmp.Diff(tc.wantFound, found); d != "" {
t.Errorf("Cache check failure %s", diff.PrintWantGot(d))
}
})
}
}
|
package configs
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadConfig(t *testing.T) {
cfg, err := LoadConfig("config.yaml")
assert.Equal(t, nil, err)
assert.Equal(t, "debug", cfg.Logger.Level)
assert.Equal(t, "127.0.0.1:8180", cfg.Application.Addr)
assert.Equal(t, []string{"stdout"}, cfg.Logger.OutputPaths)
}
func TestLoadConfig_WithEnv(t *testing.T) {
_ = os.Setenv("JFISH_LOGGER_LEVEL", "info")
_ = os.Setenv("JFISH_APPLICATION_ADDR", "www.jellyfish.com:80")
cfg, _ := LoadConfig("config.yaml")
assert.Equal(t, "info", cfg.Logger.Level)
assert.Equal(t, "www.jellyfish.com:80", cfg.Application.Addr)
}
|
package user
import (
. "github.com/jsl0820/wechat"
"testing"
)
func init() {
config := Config{
WxAppId: "wx582ef3694f7a7546",
WxAppSecret: "148ee9063222674ef03e4c21776e02cd",
}
WxConfig(config)
}
func TestCreate(t *testing.T) {
tag := new(UserTag)
id, err := tag.Create("dog122")
t.Log(id)
t.Log(err)
}
func TestList(t *testing.T) {
tag := new(UserTag)
resp, err := tag.List()
t.Log(*resp)
t.Log(err)
}
func TestDel(t *testing.T) {
tag := new(UserTag)
isDel := tag.Del(104)
t.Log(isDel)
}
func TestUserInfo(t *testing.T) {
u := new(User)
info, err := u.Info("oKPxfwCpNKAxAA01yjjWt1WJY6-k")
t.Log(info.Remark)
t.Log(err)
}
func TestUserList(t *testing.T) {
u := new(User)
list, e := u.List("oKPxfwCpNKAxAA01yjjWt1WJY6-k")
t.Log(list)
t.Log(e)
}
func TestUserRemark(t *testing.T) {
u := new(User)
isRemarked := u.Remark("oKPxfwCpNKAxAA01yjjWt1WJY6-k", "male")
t.Log(isRemarked)
}
func TestUserBlock(t *testing.T) {
u := new(User)
var users = []string{
"oKPxfwCpNKAxAA01yjjWt1WJY6-k",
"oKPxfwK493kkbIH1dBrIP-nBADBc",
}
isSuccess := u.Block(users...)
t.Log(isSuccess)
}
func TestBlackList(t *testing.T) {
u := new(User)
list, e := u.BlackList("oKPxfwCpNKAxAA01yjjWt1WJY6-k")
t.Log(list)
t.Log(e)
}
func TestBlackCancel(t *testing.T) {
u := new(User)
var users = []string{
"oKPxfwCpNKAxAA01yjjWt1WJY6-k",
"oKPxfwK493kkbIH1dBrIP-nBADBc",
}
isCancel := u.BlockCancel(users...)
t.Log(isCancel)
}
|
// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package executer allows execute Pending association and InProgress association
package executer
import (
"github.com/aws/amazon-ssm-agent/agent/association/taskpool"
"github.com/aws/amazon-ssm-agent/agent/context"
docModel "github.com/aws/amazon-ssm-agent/agent/docmanager/model"
"github.com/aws/amazon-ssm-agent/agent/task"
"github.com/stretchr/testify/mock"
)
// DocumentExecuterMock stands for a mocked document executer.
type DocumentExecuterMock struct {
mock.Mock
}
// ExecutePendingDocument mocks implementation for ExecuteInProgressDocument
func (m *DocumentExecuterMock) ExecutePendingDocument(context context.T, pool taskpool.T, interimDocState *docModel.DocumentState) error {
args := m.Called(context, pool, interimDocState)
return args.Error(0)
}
// ExecuteInProgressDocument mocks implementation for ExecuteInProgressDocument
func (m *DocumentExecuterMock) ExecuteInProgressDocument(context context.T, interimDocState *docModel.DocumentState, cancelFlag task.CancelFlag) {
}
|
package main
import (
"database/sql"
"log"
"testing"
"github.com/olivere/elastic"
)
var recordCount = 10000
var searchCount = 1000
func BenchmarkInsertRandomRecordsToManticore(b *testing.B) {
var err error
var db *sql.DB
db, err = InitSphinxConnection("9306")
if err != nil {
log.Println(err)
return
}
defer db.Close()
err = InsertRandomRecordToSphinx(db, recordCount)
if err != nil {
log.Println(err)
return
}
}
func BenchmarkInsertRandomRecordsToSphinx(b *testing.B) {
var err error
var db *sql.DB
db, err = InitSphinxConnection("9307")
if err != nil {
log.Println(err)
return
}
defer db.Close()
err = InsertRandomRecordToSphinx(db, recordCount)
if err != nil {
log.Println(err)
return
}
}
func BenchmarkInsertRandomRecordsToElastic(b *testing.B) {
var err error
var client *elastic.Client
client, err = InitElasticConnection()
if err != nil {
log.Println(err)
return
}
err = InsertRandomRecordToElastic(client, recordCount)
if err != nil {
log.Println(err)
return
}
}
func BenchmarkReadManticoreIds(b *testing.B) {
var err error
var db *sql.DB
db, err = InitSphinxConnection("9306")
if err != nil {
log.Println(err)
return
}
defer db.Close()
var i int
for i = 0; i < searchCount; i++ {
_, err = ReadSphinxIds(db, "MATCH('"+loremIpsumGenerator.Word()+"')")
if err != nil {
log.Println(err)
return
}
}
}
func BenchmarkReadSphinxIds(b *testing.B) {
var err error
var db *sql.DB
db, err = InitSphinxConnection("9307")
if err != nil {
log.Println(err)
return
}
defer db.Close()
var i int
for i = 0; i < searchCount; i++ {
_, err = ReadSphinxIds(db, "MATCH('"+loremIpsumGenerator.Word()+"')")
if err != nil {
log.Println(err)
return
}
}
}
func BenchmarkReadElasticIds(b *testing.B) {
var err error
var client *elastic.Client
client, err = InitElasticConnection()
if err != nil {
log.Println(err)
return
}
var i int
for i = 0; i < searchCount; i++ {
_, err = ReadElasticIds(client)
if err != nil {
log.Println(err)
return
}
}
}
|
package main
import (
"fmt"
"github.com/bromaniac/advent_of_code/2021/util"
"strconv"
)
func main() {
data := input.Readln()
counter := 0
for i, j := range data {
if i == len(data)-1 {
break
}
c, _ := strconv.Atoi(j) // current
n, _ := strconv.Atoi(data[i+1]) //next
if c < n {
counter++
}
}
fmt.Println("Num of increases: ", counter)
}
|
package bit
import "testing"
//数字范围按位与
func rangeBitwiseAnd(m int, n int) int {
for m < n {
n &= n - 1
}
return n
}
func Test_201(t *testing.T) {
t.Log(rangeBitwiseAnd(5, 7)) // 101 111 101
t.Log(rangeBitwiseAnd(0, 1))
}
|
package service
import (
"context"
"fmt"
"testing"
"time"
)
func TestNewTCPService(t *testing.T) {
tcpService := NewTCPService(context.Background())
err := tcpService.StartListen(10000)
if err != nil {
t.Fatal(err)
}
err = tcpService.StartSend("127.0.0.1", 10000)
if err != nil {
t.Fatal(err)
}
time.Sleep(50* time.Millisecond)
senderId := tcpService.SendList()[0]
receiverId := tcpService.ReceiveList()[0]
sender, _ := tcpService.GetSender(senderId)
receiver, _ := tcpService.GetReceiver(receiverId)
go func() {
for {
fmt.Println(fmt.Sprintf("发送瞬时:%.3f,\t发送平均%.3f\t接收瞬时:%.3f\t接收平均%.3f (b/ms)",
sender.Recorder.InstantV(), sender.Recorder.AverageV(),
receiver.Recorder.InstantV(), receiver.Recorder.AverageV()))
time.Sleep(time.Second)
}
}()
time.Sleep(10 * time.Second)
}
|
package hook
import (
"github.com/utahta/momoclo-channel/validator"
)
type (
// PersistenceBeforeSaver hook
PersistenceBeforeSaver interface {
BeforeSave()
}
)
// BeforeSave hook
func BeforeSave(src interface{}) {
if p, ok := src.(PersistenceBeforeSaver); ok {
p.BeforeSave()
}
}
// Validate hook
func Validate(src interface{}) error {
return validator.Validate(src)
}
|
package enty
type Video struct {
ID uint64 `gorm:"primary_key;auto_increment" json:"id"`
Title string `json:"title" gorm:"type:varchar(100)"`
URL string `json:"url" gorm:"type:varchar(300);UNIQUE"`
Email string `json:"email" gorm:"type:varchar(300)"`
}
|
package main
import (
"syscall/js"
)
func main() {
registerCallbacks()
select {}
}
func registerCallbacks() {
js.Global().Set("hello", js.FuncOf(Hello))
}
func Hello(this js.Value, args []js.Value) interface{} {
return "Username"
}
|
package handlers
import (
"github.com/MetalRex101/auth-server/app/services"
"github.com/jinzhu/gorm"
)
type Handlers struct{
Oauth *Oauth
Api *Api
}
type Oauth struct {
AuthorizeClientHandler *AuthorizeClientHandler
AccessTokenHandler *AccessTokenHandler
ProfileHandler *ProfileHandler
}
type Api struct {
ActivateHandler *ActivateHandler
RegisterHandler *RegisterHandler
ProfileHandler *ProfileHandler
}
type DefaultResponse struct {
Status int
}
func InitHandlers(managers *services.Managers, merger *services.UserMerger, db *gorm.DB) *Handlers {
return &Handlers{
Oauth: &Oauth {
AuthorizeClientHandler: NewAuthorizeClientHandler(managers.OauthClient, managers.User),
AccessTokenHandler: NewAccessTokenClientHandler(managers.OauthClient, managers.OauthSession),
},
Api: &Api {
ActivateHandler: NewActivateHandler(
managers.OauthSession,
managers.OauthClient,
managers.User,
managers.Email,
merger,
),
RegisterHandler: NewRegisterHandler(
managers.OauthSession,
managers.OauthClient,
managers.User,
managers.Email,
merger,
db,
),
ProfileHandler: NewProfileHandler(
managers.OauthSession,
managers.OauthClient,
managers.User,
managers.Email,
),
},
}
} |
package version
var commit string
func Commit() string {
return commit
}
|
package main
import "fmt"
func main() {
s := "LVIII"
fmt.Println(romanToInt(s))
}
func romanToInt(s string) int {
m := map[string]int{}
m["I"] = 1
m["V"] = 5
m["X"] = 10
m["L"] = 50
m["C"] = 100
m["D"] = 500
m["M"] = 1000
// 从右到左遍历
sum := m[string(s[len(s)-1])]
for i := len(s) - 2; i >= 0; i-- {
// 左小右大
if m[string(s[i])] < m[string(s[i+1])] {
sum -= m[string(s[i])]
} else {
// 左大右小
sum += m[string(s[i])]
}
}
return sum
}
|
package main
import (
"io/ioutil"
"net/url"
"strings"
"time"
h "github.com/xorpaul/gohelper"
yaml "gopkg.in/yaml.v2"
)
// configSettings contains the key value pairs from the config file
type configSettings struct {
Timeout time.Duration `yaml:"timeout"`
ServiceUrl string `yaml:"service_url"`
ServiceUrlCaFile string `yaml:"service_url_ca_file"`
Fqdn string `yaml:"requesting_fqdn"`
PrivateKey string `yaml:"ssl_private_key,omitempty"`
CertificateFile string `yaml:"ssl_certificate_file,omitempty"`
RequireAndVerifyClientCert bool `yaml:"ssl_require_and_verify_client_cert"`
RestartConditionScript string `yaml:"restart_condition_script"`
RestartConditionScriptExitCodeForReboot int `yaml:"restart_condition_script_exit_code_for_reboot"`
OsRestartHooksDir string `yaml:"os_restart_hooks_dir"`
OsRestartHooksAllowFail bool `yaml:"os_restart_hooks_allow_fail"`
}
// readConfigfile creates the configSettings struct from the config file
func readConfigfile(configFile string) configSettings {
if !h.FileExists(configFile) {
h.Fatalf("config file '" + configFile + "' not found!")
}
h.Debugf("Trying to read config file: " + configFile)
data, err := ioutil.ReadFile(configFile)
if err != nil {
h.Fatalf("readConfigfile(): There was an error parsing the config file " + configFile + ": " + err.Error())
}
var config configSettings
err = yaml.Unmarshal([]byte(data), &config)
if err != nil {
h.Fatalf("In config file " + configFile + ": YAML unmarshal error: " + err.Error())
}
//fmt.Print("config: ")
//fmt.Printf("%+v\n", config)
// set default timeout to 5 seconds if no timeout setting found
if config.Timeout == 0 {
config.Timeout = 5
}
if len(config.ServiceUrl) < 1 {
h.Fatalf("Missing service_url setting in config file: " + configFile)
}
_, err = url.ParseRequestURI(config.ServiceUrl)
if err != nil {
h.Fatalf("Failed to parse/validate service_url setting " + config.ServiceUrl + " in config file: " + configFile)
}
if !strings.HasSuffix(config.ServiceUrl, "/") {
config.ServiceUrl = config.ServiceUrl + "/"
}
if len(config.ServiceUrlCaFile) > 0 && !h.FileExists(config.ServiceUrlCaFile) {
h.Fatalf("Failed to find configured service_url_ca_file " + config.ServiceUrlCaFile)
}
if len(config.PrivateKey) > 0 && !h.FileExists(config.PrivateKey) {
h.Fatalf("Failed to find configured ssl_private_key " + config.PrivateKey)
}
if len(config.CertificateFile) > 0 && !h.FileExists(config.CertificateFile) {
h.Fatalf("Failed to find configured ssl_certificate_file " + config.CertificateFile)
}
if len(config.RestartConditionScript) < 1 {
h.Fatalf("Missing restart_condition_script setting in config file: " + configFile)
} else if !h.FileExists(config.RestartConditionScript) {
h.Fatalf("Failed to find configured restart_condition_script " + config.RestartConditionScript)
}
if len(config.OsRestartHooksDir) < 1 {
h.Fatalf("Missing os_restart_hooks_dir setting in config file: " + configFile)
} else if !h.FileExists(config.OsRestartHooksDir) {
h.Fatalf("Failed to find configured os_restart_hooks_dir " + config.OsRestartHooksDir)
}
return config
}
|
package auth
import (
. "ftnox.com/common"
"ftnox.com/db"
"code.google.com/p/go.crypto/scrypt"
"database/sql"
"errors"
"bytes"
"strings"
"math"
)
var (
ERR_DUPLICATE_ADDRESS = errors.New("ERR_DUPLICATE_ADDRESS")
)
// USER
type User struct {
Id int64 `json:"id" db:"id,autoinc"`
Email string `json:"email" db:"email"`
EmailCode string `json:"-" db:"email_code"`
EmailConf int32 `json:"-" db:"email_conf"`
Password string `json:"-"`
Scrypt []byte `json:"-" db:"scrypt"`
Salt []byte `json:"-" db:"salt"`
TOTPKey []byte `json:"-" db:"totp_key"`
TOTPConf int32 `json:"totpConf" db:"totp_conf"`
ChainIdx int32 `json:"-" db:"chain_idx"`
Roles string `json:"roles" db:"roles"`
}
var UserModel = db.GetModelInfo(new(User))
func (user *User) HasRole(role string) bool {
roles := strings.Split(user.Roles, ",")
for _, rl := range roles {
if role == rl { return true }
}
return false
}
func (user *User) Authenticate(password string) bool {
// Scrypt the password.
scryptPassword, err := scrypt.Key([]byte(password), user.Salt, 16384, 8, 1, 32)
if err != nil { panic(err) }
return bytes.Equal(scryptPassword, user.Scrypt)
}
// Create a new user.
func SaveUser(user *User) (*User, error) {
// Create email confirmation code.
if user.EmailCode == "" { user.EmailCode = RandId(24) }
// Create TOTPKey.
if len(user.TOTPKey) == 0 { user.TOTPKey = RandBytes(10) }
// Scrypt the password.
if user.Password != "" {
salt := RandId(12)
scryptPass, err := scrypt.Key([]byte(user.Password), []byte(salt), 16384, 8, 1, 32)
if err != nil { return nil, err }
user.Salt = []byte(salt)
user.Scrypt = scryptPass
}
err := db.DoBeginSerializable(func(tx *db.ModelTx) {
// Insert into users table.
err := tx.QueryRow(
`INSERT INTO auth_user (`+UserModel.FieldsInsert+`)
VALUES (`+UserModel.Placeholders+`)
RETURNING id`,
user,
).Scan(&user.Id)
if err != nil { panic(err) }
// Set the chain_idx
if user.Id > math.MaxInt32 { panic("User autoinc id has exceeded MaxInt32") }
user.ChainIdx = int32(user.Id)
_, err = tx.Exec(
`UPDATE auth_user
SET chain_idx = id
WHERE id=?`,
user.Id,
)
if err != nil { panic(err) }
// Generate an API key for the user
apiKey := &APIKey{Key:RandId(24), UserId: user.Id}
SaveAPIKey(tx, apiKey)
})
switch db.GetErrorType(err) {
case db.ERR_DUPLICATE_ENTRY:
return nil, ERR_DUPLICATE_ADDRESS
case nil:
break
default:
panic(err)
}
return user, nil
}
func UpdateUserSetEmailConfirmed(emailCode string) {
_, err := db.Exec(
`UPDATE auth_user
SET email_conf=1
WHERE email_code=?`,
emailCode,
)
if err != nil { panic(err) }
}
func UpdateUserSetTOTPConfirmed(userId int64) {
_, err := db.Exec(
`UPDATE auth_user
SET totp_conf=1
WHERE id=?`,
userId,
)
if err != nil { panic(err) }
}
func LoadUserByEmail(email string) *User {
var user User
err := db.QueryRow(
`SELECT `+UserModel.FieldsSimple+`
FROM auth_user WHERE email=?`,
email,
).Scan(
&user,
)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &user
default:
panic(err)
}
}
func LoadUser(userId int64) *User {
var user User
err := db.QueryRow(
`SELECT `+UserModel.FieldsSimple+`
FROM auth_user WHERE id=?`,
userId,
).Scan(
&user,
)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &user
default:
panic(err)
}
}
// API KEY
type APIKey struct {
Key string `json:"key" db:"key"`
UserId int64 `json:"-" db:"user_id"`
Roles string `json:"roles" db:"roles"`
}
var APIKeyModel = db.GetModelInfo(new(APIKey))
func SaveAPIKey(tx *db.ModelTx, apiKey *APIKey) (*APIKey) {
_, err := tx.Exec(
`INSERT INTO auth_api_key (`+APIKeyModel.FieldsInsert+`)
VALUES (`+APIKeyModel.Placeholders+`)`,
apiKey,
)
if err != nil { panic(err) }
return apiKey
}
func LoadAPIKey(key string) *APIKey {
var apiKey APIKey
err := db.QueryRow(
`SELECT `+APIKeyModel.FieldsSimple+`
FROM auth_api_key WHERE key=?`,
key,
).Scan(
&apiKey,
)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &apiKey
default:
panic(err)
}
}
func LoadAPIKeysByUser(userId int64) []*APIKey {
rows, err := db.QueryAll(APIKey{},
`SELECT `+APIKeyModel.FieldsSimple+`
FROM auth_api_key
WHERE user_id=?`,
userId,
)
if err != nil { panic(err) }
return rows.([]*APIKey)
}
|
//Copyright (C) 2020 Larry Rau. all rights reserved
package lex
import (
"fmt"
"net"
"strings"
"unicode"
)
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isStmtStart returns true if the rune could start a statement
func isStmtStart(r rune) bool {
if strings.IndexRune(runSTMTSTART, r) >= 0 {
return true
}
return false
}
// isOp returns true if one of the valid operators
func isOp(r rune) bool {
if strings.IndexRune(runOps, r) >= 0 {
return true
}
return false
}
// lexer states
// start scanning for policy statement sections
// eat all whitespace and comments looking for a section identifier
func lexOuter(lex *Lexer) stateFn {
// consume whitespace
lex.acceptRun(runWhitespace)
lex.ignore()
switch r := lex.next(); {
case r == symEOF:
lex.emit(TokEOF)
return nil
case isAlphaNumeric(r):
lex.backup()
return lexSectionHeader
case r == symHASH:
// starting a comment; consume it.
lex.backup()
lex.pushStateFn(lexOuter)
return lexComment
}
lex.flushStateFn()
return lexOuter
}
// lexSection will scan the header of a section.
// Each section is delimited by '{' '}'
func lexSectionHeader(lex *Lexer) stateFn {
// find an identifier
lex.acceptRun(runAlphanumsym)
ident := lex.input[lex.start:lex.pos]
section, ok := sections[ident]
if !ok {
return lex.errorf("expected section name, got: %v", ident)
}
lex.emit(section)
lex.consumeWhitespace()
// expect open curly
if lex.next() != symLCURLY {
return lex.errorf("expected left brace, got:%v", lex.next())
}
lex.emit(TokLCURLY)
lex.consumeWhitespace()
if section == TokNATIVE {
// we don't need/want to tokenize the statements of native; just pass along
return lexSectionNative
}
return lexSection
}
// lexSection scans a non-native section which is a set of statements
// statements can only start with a small set of symbols
func lexSection(lex *Lexer) stateFn {
lex.consumeWhitespace()
switch r := lex.peek(); {
case isStmtStart(r):
return lexStatement
case r == symHASH:
lex.pushStateFn(lexSection)
return lexComment
case r == symLCURLY:
lex.pos++ //eat {
if lex.sequence {
return lex.errorf("nested sequence not allowed")
}
lex.sequence = true
lex.emit(TokSSEQ)
return lexSection
case r == symRCURLY:
lex.pos++ //eat }
if lex.sequence {
lex.emit(TokESEQ)
lex.sequence = false
return lexSection
}
//section end
lex.emit(TokRCURLY)
return lexOuter
}
return lex.errorf("stopping here")
}
// lexStatement will scan statements within a non-native section.
// A statement is a line-terminated sequence of tokens.
// comments are consumed and ignored.
// comments at the end of a line are the last items in a line and the
// entire comment is treated as an "end of statemnt".
// a native formula (started with '|') ends at new-line marks an end of statement
func lexStatement(lex *Lexer) stateFn {
lex.consumeNonNLwhite()
var r rune
switch r = lex.peek(); {
case unicode.IsLetter(r):
lex.pushStateFn(lexStatement)
return lexIdentifier
case unicode.IsDigit(r):
lex.pushStateFn(lexStatement)
return lexNumbers
case isOp(r):
return lexOp
case r == symCOLON:
lex.pushStateFn(lexStatement)
lex.incIgnore() //ignore symCOLON
return lexPort
case r == symHYPH:
lex.pos++
lex.emit(TokHYPH)
return lexStatement
case r == symLPAREN:
lex.pos++
lex.emit(TokSLIST)
return lexList
case r == symSTAR:
lex.pos++
lex.emit(TokSTAR)
return lexStatement
case r == symLBRACK:
lex.pos++
lex.emit(TokLBRACK)
return lexNAT
case r == symPIPE:
lex.pos++ //eat pipe
if !scanFormula(lex) {
return lex.errorf("expected a native formula")
}
return lexStatement
case isEndOfLine(r):
lex.pos++
lex.emit(TokENDSTMT)
return lexSection
case r == symHASH:
lex.emit(TokENDSTMT)
return lexSection
}
return lex.errorf("unexpected char:%v", r)
}
// lexIdentifier will consume a valid identifier; first char is present
func lexIdentifier(lex *Lexer) stateFn {
lex.acceptRun(runAlphanumsym)
word := lex.input[lex.start:lex.pos]
tok, ok := allkeywords[word]
if ok {
lex.emit(tok)
} else {
lex.emit(TokIDENTIFIER)
}
return lex.popStateFn()
}
// lexNumbers scans host or net addresses or address ranges
func lexNumbers(lex *Lexer) stateFn {
lex.acceptRun(runIPN)
numb := lex.input[lex.start:lex.pos]
if strings.IndexByte(numb, '/') >= 0 {
//looks like a network
_, ipnet, err := net.ParseCIDR(numb)
if err != nil {
return lex.errorf("bad network: %v", err)
}
lex.tokens <- Token{TokNETADDR, fmt.Sprintf("%v", ipnet)}
lex.start = lex.pos
} else {
ip := net.ParseIP(numb)
if ip == nil {
return lex.errorf("bad host address: %s", numb)
}
lex.emit(TokHOSTADDR)
}
return lex.popStateFn()
}
// lexList will scan a list of ident|addrs e.g. (anident,10.1.0.1)
func lexList(lex *Lexer) stateFn {
//look for identfiers or numbers
lex.consumeNonNLwhite()
var r rune
switch r := lex.peek(); {
case unicode.IsLetter(r):
lex.pushStateFn(lexList)
return lexIdentifier
case unicode.IsDigit(r):
lex.pushStateFn(lexList)
return lexNumbers
case r == symCOMA:
lex.pos++
return lexList
case r == symHYPH:
lex.pos++
lex.emit(TokHYPH)
return lexList
case r == symRPAREN:
lex.pos++
lex.emit(TokELIST)
return lexStatement
}
return lex.errorf("in list unexpected char:%v", r)
}
// lexNAT scans the NAT descriptors '['<ident>|<address>[':'<port>]']'
// we are already in the NAT descriptor looking for end of NAT
func lexNAT(lex *Lexer) stateFn {
switch r := lex.peek(); {
case r == symDOT:
lex.pos++
lex.emit(TokDOT)
return lexNAT
case r == symRBRACK:
lex.pos++
lex.emit(TokRBRACK)
return lexStatement
case r == symCOLON:
lex.pushStateFn(lexNAT)
lex.incIgnore() //skip symCOLON
return lexPort
case isAlphaNumeric(r):
lex.pushStateFn(lexNAT)
return lexIdentifier
case unicode.IsDigit(r):
lex.pushStateFn(lexNAT)
return lexNumbers
}
return lex.errorf("unexpected symbol in NAT descriptor")
}
// lexOp scans for an operator.
// operators can be 1 or 2 characters
func lexOp(lex *Lexer) stateFn {
// > == allow
// <> == bidirectional allow
// / == drop
// // == reject
r := lex.next()
switch r {
case symGTHAN:
lex.emit(TokALLOW)
return lexStatement
case symLTHAN:
rr := lex.peek()
if rr == symGTHAN {
lex.pos++
lex.emit(TokTWALLOW)
return lexStatement
}
return lex.errorf("exected operator got:%v%v", r, rr)
case symSLASH:
rr := lex.peek()
if rr == symSLASH {
lex.pos++
lex.emit(TokREJECT)
return lexStatement
}
lex.emit(TokDROP)
return lexStatement
}
return lex.errorf("expected operator, got:%v", r)
}
// lexPort will scan a port number.
// allow ':'<int> or a range ':'<int>'-'<int> or a list '('<int>[','<int>]*')'
// allow port list with a port range: '('<int>','<int>'-'<int>','<int>')'
func lexPort(lex *Lexer) stateFn {
//lex.consumeWhitespace()
switch r := lex.peek(); {
case unicode.IsDigit(r):
lex.acceptRun(runDigits)
lex.emit(TokPORT)
case r == symLPAREN:
if lex.list {
return lex.errorf("nested port list not allowed")
}
lex.list = true
lex.pos++
lex.emit(TokSLIST)
return lexPort
}
switch r := lex.peek(); {
case r == symCOMA:
if !lex.list {
return lex.errorf("unexpected ',' in port")
}
lex.incIgnore()
return lexPort
case r == symHYPH:
lex.pos++
lex.emit(TokHYPH)
return lexPort
case r == symRPAREN:
if !lex.list {
return lex.errorf("unexpected end of port list")
}
lex.list = false
lex.pos++
lex.emit(TokELIST)
}
// leaving port scanning
return lex.popStateFn()
}
// lexSectionNative will scan the native section as lines
func lexSectionNative(lex *Lexer) stateFn {
lex.consumeWhitespace()
r := lex.next()
switch {
case r == symHASH:
lex.backup()
lex.pushStateFn(lexSectionNative)
return lexComment
case isEndOfLine(r):
lex.emit(TokENDSTMT)
return lexSectionNative
case r == symRCURLY:
lex.emit(TokRCURLY)
return lexOuter
default:
// scan and emit each line as a separate Formula token
if scanFormula(lex) {
return lexSectionNative
}
}
return lex.errorf("unexpected input: %v", r)
}
// scan rest of line and return as part of token TokFORMULA
// return true if formula found else false
func scanFormula(lex *Lexer) bool {
lex.consumeWhitespace() //allow and ignore leading ws
if lex.acceptRunUntil(runLineTail) {
lex.emit(TokFORMULA)
return true
}
return false //formula not found
}
// lexComment scans a comment. The comment marker is known to be present.
func lexComment(lex *Lexer) stateFn {
lex.pos++ //eat comment marker
lex.acceptRunUntil(runEOL) //read until eol
lex.pos++ //eat the EOL
lex.ignore() //eat the comment
return lex.popStateFn() //return to prev state
}
|
package standalone
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadConfig(t *testing.T) {
p := NewStandalone("./testconfig.yaml")
config, err := p.LoadConfig()
assert.True(t, len(config) == 1)
assert.Nil(t, err)
assert.NotNil(t, config)
assert.Equal(t, "eventstore", config[0].Kind)
assert.Equal(t, "myeventstore", config[0].Metadata.Name)
assert.Equal(t, "eventstore.azure.tablestorage", config[0].Spec.Type)
assert.Equal(t, "storageAccountName", config[0].Spec.Metadata[0].Name)
assert.Equal(t, "testaccount", config[0].Spec.Metadata[0].Value)
assert.Equal(t, "storageAccountKey", config[0].Spec.Metadata[1].Name)
assert.Equal(t, "testaccountkey", config[0].Spec.Metadata[1].Value)
}
|
package scheduler
import (
"github.com/rebelit/gome/common"
"github.com/rebelit/gome/devices"
"log"
"strconv"
"time"
)
func GoGoScheduler() error {
log.Println("[INFO] schedule runner, starting")
for {
processSchedules := true
devs, err := devices.GetAllDevicesFromDb()
if err != nil{
log.Printf("[WARN] schedule runner, get all devices: %s", err)
processSchedules = false
}
if len(devs) == 0{
log.Printf("[WARN] schedule runner, no devices found in the database")
processSchedules = false
}
if processSchedules {
for _, dev := range devs {
doItForReal := true
d, err := devices.GetDevice(dev)
if err != nil {
log.Printf("[WARN] schedule runner, unable to get dbData for %s: %s", dev, err)
}
//check if device is online 'alive'
devStatus, err := devices.GetDeviceAliveState(d.Name)
if err != nil {
log.Printf("[ERROR] scheduler runner, unable to get %s alive status: %s\n", d.Name, err)
}
devAlive, _ := strconv.ParseBool(devStatus)
if devAlive {
//get schedules data from redis
hasSchedule, s, err := scheduleGet(d.Name)
if err != nil {
log.Printf("[WARN] schedule runner, unable to get %s schedule: %s", dev, err)
doItForReal = false
}
if !hasSchedule {
log.Printf("[DEBUG] schedule runner, no schedule for %s", dev)
doItForReal = false
}
if doItForReal {
if s.Status != "enable" {
log.Printf("[DEBUG] schedule runner, %s has schedule defined but not enabled", dev)
doItForReal = false
}
}
if doItForReal {
go doSchedule(d, s.Schedules)
}
}
}
}
time.Sleep(time.Minute *common.SCHEDULE_MIN)
}
}
func doSchedule(device devices.Devices, schedules []Schedule) {
_, iTime, day, _ := splitTime() //custom parse date/time
for _, schedule := range schedules {
if schedule.Day == day {
if schedule.Status == "enable" {
devComponentState, err := devices.GetDeviceComponentState(device.Name, schedule.Component)
if err != nil {
log.Printf("[ERROR] doSchedule, get %s %s state: %s\n", device, schedule.Component, err)
return
}
componentState, _ := strconv.ParseBool(devComponentState) //state of the device component in the schedule
onTime, _ := strconv.Atoi(schedule.On) //time of day device is on
offTime, _ := strconv.Atoi(schedule.Off) //time of day device is off
doChange, inSchedule := changeComponentState(componentState, iTime, onTime, offTime)
if doChange {
if inSchedule {
log.Printf("[DEBUG] %s turn on\n", device.Name)
devices.DoScheduledAction(device.Device, device.Name, schedule.Component, "on") //turn it on
}
if !inSchedule {
log.Printf("[ANDY] %s turn off\n", device.Name)
devices.DoScheduledAction(device.Device, device.Name, schedule.Component, "off") //turn it off
}
}
}
}
}
}
func changeComponentState(componentState bool, currentHour int, onTime int, offTime int) (changeState bool, inScheduleBlock bool){
reverseCheck := false
changeState = false
inScheduleBlock = false
if offTime <= onTime {
//spans a day PM to AM on schedule
reverseCheck = true
}
if !reverseCheck{
//does not span PM to AM
inScheduleBlock = inBetween(currentHour, onTime, offTime)
} else {
//spans a day PM to AM reverse check the schedule
inScheduleBlock = inBetweenReverse(currentHour, offTime, onTime)
}
if componentState {
if inScheduleBlock{
//leave it be change state:false
changeState = false
return changeState, inScheduleBlock
} else {
//change state:true. change the power control to false
changeState = true
return changeState, inScheduleBlock
}
} else {
if inScheduleBlock{
//change state:true. change the power control to true
changeState = true
return changeState, inScheduleBlock
}else {
//leave it be change state:false
changeState = false
return changeState, inScheduleBlock
}
}
} |
package dgoflake
import (
"testing"
"time"
)
func TestSnowflake_Increment(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := id & 0xFFF
actual := snowflake.Increment()
if expected != actual {
t.Errorf("unexpected increment value for snowflake %d: expected %d, got %d", id, expected, actual)
}
}
func TestSnowflake_Int64(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := int64(id)
actual := snowflake.Int64()
if expected != actual {
t.Errorf("unexpected int64 value for snowflake %d: expected %d, got %d", id, expected, actual)
}
}
func TestSnowflake_InternalProcessID(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := (id & 0x1F000) >> 12
actual := snowflake.InternalProcessID()
if expected != actual {
t.Errorf("unexpected internal process ID value for snowflake %d: expected %d, got %d", id, expected, actual)
}
}
func TestSnowflake_InternalWorkerID(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := (id & 0x3E0000) >> 17
actual := snowflake.InternalWorkerID()
if expected != actual {
t.Errorf("unexpected internal worker ID value for snowflake %d: expected %d, got %d", id, expected, actual)
}
}
func TestSnowflake_String(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := "577645285396840449"
actual := snowflake.String()
if expected != actual {
t.Errorf("unexpected string value for snowflake %d: expected %s, got %s", id, expected, actual)
}
}
func TestSnowflake_Timestamp(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := time.Unix(0, ((int64(id)>>22)+int64(DiscordEpoch))*1000000).String()
actual := snowflake.Timestamp().String()
if expected != actual {
t.Errorf("unexpected timestamp value for snowflake %d: expected %s, got %s", id, expected, actual)
}
}
func TestSnowflake_UInt64(t *testing.T) {
var id uint64 = 577645285396840449
snowflake := ParseInt(id)
expected := id
actual := snowflake.UInt64()
if expected != actual {
t.Errorf("unexpected uint64 value for snowflake %d: expected %d, got %d", id, expected, actual)
}
}
func BenchmarkSnowflake_Timestamp(b *testing.B) {
snowflake := ParseInt(577654653257383975)
for i := 0; i < b.N; i++ {
snowflake.Timestamp()
}
}
|
package memstorage
import (
"context"
"errors"
"github.com/SmitSheth/Mini-twitter/internal/user"
"github.com/SmitSheth/Mini-twitter/internal/user/userpb"
pb "github.com/SmitSheth/Mini-twitter/internal/user/userpb"
)
type userRepository struct {
storage *userStorage
}
// GetUserRepository returns a UserRepository that uses package level storage
func GetUserRepository() user.UserRepository {
return &userRepository{UserStorage}
}
// NewUserRepository reutnrs a UserRepository that uses the given storage
func NewUserRepository(storage *userStorage) user.UserRepository {
return &userRepository{storage}
}
// CreateUser adds a user to the appropriate data structures
func (userRepo *userRepository) CreateUser(ctx context.Context, user *userpb.User) (uint64, error) {
result := make(chan uint64, 1)
errorchan := make(chan error, 1)
go func() {
//user.AccountInformation.UserId = userRepo.storage.generateUserId()
newUserEntry := new(userEntry)
newUserEntry.user = user
userRepo.storage.usersRWMu.Lock()
userRepo.storage.users[user.AccountInformation.UserId] = newUserEntry
userRepo.storage.usersRWMu.Unlock()
result <- user.AccountInformation.UserId
}()
select {
case userID := <-result:
return userID, nil
case err := <-errorchan:
//Sending 0 as an invalid postID
return 0, err
case <-ctx.Done():
// if ctx.Done(), we need to make sure that if the user has or will be created, it is deleted,
// so start a new go routine to monitor the result and error channels
go func() {
select {
case userID := <-result:
userRepo.DeleteUser(context.Background(), userID)
return
case <-errorchan:
return
}
}()
return 0, ctx.Err()
}
}
// GetUser creates a copy of the specified user.
func (userRepo *userRepository) GetUser(ctx context.Context, userID uint64) (*pb.User, error) {
result := make(chan *pb.User, 1)
errorchan := make(chan error, 1)
go func() {
userRepo.storage.usersRWMu.RLock()
defer userRepo.storage.usersRWMu.RUnlock()
userEntry, exists := userRepo.storage.users[userID]
if !exists {
errorchan <- errors.New("user not found")
} else {
result <- userEntry.user
}
}()
select {
case user := <-result:
return user, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// GetUsers creates a copy of the specified users.
func (userRepo *userRepository) GetUsers(ctx context.Context, userIDs []uint64) ([]*pb.User, error) {
result := make(chan []*pb.User, 1)
errorchan := make(chan error, 1)
go func() {
userRepo.storage.usersRWMu.RLock()
defer userRepo.storage.usersRWMu.RUnlock()
cp := make([]*pb.User, 0, len(userIDs))
for _, v := range userIDs {
cp = append(cp, userRepo.storage.users[v].user)
}
result <- cp
}()
select {
case users := <-result:
return users, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// GetAllUsers returns all users
func (userRepo *userRepository) GetAllUsers(ctx context.Context) ([]*pb.User, error) {
result := make(chan []*pb.User, 1)
errorchan := make(chan error, 1)
go func() {
userRepo.storage.usersRWMu.RLock()
defer userRepo.storage.usersRWMu.RUnlock()
tempArr := make([]*pb.User, 0, len(userRepo.storage.users))
for _, u := range userRepo.storage.users {
tempArr = append(tempArr, u.user)
}
result <- tempArr
}()
select {
case users := <-result:
return users, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// FollowUser updates the following user's following map, and the followed user's followers map
// to reflect that a user is following another user
func (userRepo *userRepository) FollowUser(ctx context.Context, followingUserID uint64, UserIDToFollow uint64) error {
result := make(chan error, 1)
go func() {
if followingUserID == UserIDToFollow {
result <- errors.New("duplicate user ids")
} else {
//Add userID to be followed in the following list of user who wants to follow
followingUserIDObject, err := userRepo.storage.getUserEntry(followingUserID)
if err != nil {
result <- err
} else {
followingUserIDObject.followingRWMu.Lock()
followingUserIDObject.user.Following[UserIDToFollow] = UserIDToFollow
followingUserIDObject.followingRWMu.Unlock()
//Add userID who is following in the followers list of the user being followed
UserIDToFollowObject, err := userRepo.storage.getUserEntry(UserIDToFollow)
if err != nil {
result <- err
} else {
UserIDToFollowObject.followersRWMu.Lock()
UserIDToFollowObject.user.Followers[followingUserID] = followingUserID
UserIDToFollowObject.followersRWMu.Unlock()
result <- nil
}
}
}
}()
select {
case res := <-result:
return res
case <-ctx.Done():
// listen to the result channel in case the operation was successful, then unfollow
go func() {
res := <-result
if res == nil {
userRepo.UnFollowUser(context.Background(), followingUserID, UserIDToFollow)
}
}()
return ctx.Err()
}
}
// UnFollowUser updates the following user's following map, and the followed user's followers map
// to reflect that a user has unfollowed another user
func (userRepo *userRepository) UnFollowUser(ctx context.Context, followingUserID uint64, UserIDToUnfollow uint64) error {
result := make(chan error, 1)
go func() {
if followingUserID == UserIDToUnfollow {
result <- errors.New("duplicate user ids")
} else {
//Remove userID to be unfollowed from the following list of the user initiating unfollow request
followingUserIDObject, err := userRepo.storage.getUserEntry(followingUserID)
if err != nil {
result <- err
} else {
followingUserIDObject.followingRWMu.Lock()
newfollowing := followingUserIDObject.user.Following
delete(newfollowing, UserIDToUnfollow)
followingUserIDObject.user.Following = newfollowing
followingUserIDObject.followingRWMu.Unlock()
//Remove userID who is initiating the unfollow request from the followers list of the user being unfollowed
UserIDToUnfollowObject, err := userRepo.storage.getUserEntry(UserIDToUnfollow)
if err != nil {
result <- err
} else {
UserIDToUnfollowObject.followersRWMu.Lock()
newfollowers := UserIDToUnfollowObject.user.Followers
delete(newfollowers, followingUserID)
UserIDToUnfollowObject.user.Followers = newfollowers
UserIDToUnfollowObject.followersRWMu.Unlock()
result <- nil
}
}
}
}()
select {
case res := <-result:
return res
case <-ctx.Done():
// listen to the result channel in case the operation was successful, then follow
go func() {
res := <-result
if res == nil {
userRepo.FollowUser(context.Background(), followingUserID, UserIDToUnfollow)
}
}()
return ctx.Err()
}
}
// GetUserByUsername returns a user object by their username
func (userRepo *userRepository) GetUserByUsername(ctx context.Context, email string) (*pb.User, error) {
result := make(chan *pb.User, 1)
errorchan := make(chan error, 1)
go func() {
userRepo.storage.usersRWMu.RLock()
exists := false
for _, v := range userRepo.storage.users {
if v.user.AccountInformation.Email == email {
result <- v.user
exists = true
}
}
if !exists {
errorchan <- errors.New("user not found")
}
userRepo.storage.usersRWMu.RUnlock()
}()
select {
case user := <-result:
return user, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// GetFollowing returns an array of users that the given user is following
func (userRepo *userRepository) GetFollowing(ctx context.Context, userId uint64) ([]*pb.User, error) {
result := make(chan []*pb.User, 1)
errorchan := make(chan error, 1)
go func() {
// Get the user object from the users map
userEntry, err := userRepo.storage.getUserEntry(userId)
if err != nil {
errorchan <- err
} else {
userEntry.followingRWMu.RLock()
defer userEntry.followingRWMu.RUnlock()
databaseError := false
tempArray := make([]*pb.User, 0, 100)
for k := range userEntry.user.Following {
followingEntry, err := userRepo.storage.getUserEntry(k)
if err != nil {
// if we have an error here, it means our following data structure has an entry inconsistent
// with our user structure
databaseError = true
errorchan <- errors.New("database corruption")
panic("database corruption")
}
tempArray = append(tempArray, followingEntry.user)
}
if !databaseError {
result <- tempArray
}
}
}()
select {
case user := <-result:
return user, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// GetNotFollowing returns an array of users that the given user is not following
func (userRepo *userRepository) GetNotFollowing(ctx context.Context, userId uint64) ([]*pb.User, error) {
result := make(chan []*pb.User, 1)
errorchan := make(chan error, 1)
go func() {
// Get the user object from the users map
userEntry, err := userRepo.storage.getUserEntry(userId)
if err != nil {
errorchan <- err
} else {
userEntry.followingRWMu.RLock()
defer userEntry.followingRWMu.RUnlock()
tempArray := make([]*pb.User, 0, 100)
// Iterate through entire user list
userRepo.storage.usersRWMu.RLock()
defer userRepo.storage.usersRWMu.RUnlock()
for k, v := range userRepo.storage.users {
// check if user k exists in the user's following list. If not, add it to our
// temp array
_, exists := userEntry.user.Following[k]
if !exists && k != userId {
tempArray = append(tempArray, v.user)
}
}
result <- tempArray
}
}()
select {
case user := <-result:
return user, nil
case err := <-errorchan:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
// DeleteUser removes a user
func (userRepo *userRepository) DeleteUser(ctx context.Context, userID uint64) error {
result := make(chan error, 1)
buffer := make(chan *userEntry, 1)
go func() {
userRepo.storage.usersRWMu.Lock()
defer userRepo.storage.usersRWMu.Unlock()
userEntry, exists := userRepo.storage.users[userID]
if !exists {
result <- errors.New("user not found")
return
}
delete(userRepo.storage.users, userID)
buffer <- userEntry
result <- nil
}()
select {
case ret := <-result:
return ret
case <-ctx.Done():
// if ctx done, need to continue to listen to know whether to add userEntry back into db
go func() {
select {
case userEntry := <-buffer:
userRepo.storage.usersRWMu.Lock()
defer userRepo.storage.usersRWMu.Unlock()
userRepo.storage.users[userID] = userEntry
return
case <-result:
// if result != nil, an error occurred and so don't need to add back into db
if result != nil {
return
}
}
}()
return ctx.Err()
}
}
func (userRepo *userRepository) UpdateUserAccountInfo(ctx context.Context, info *userpb.AccountInformation) error {
return errors.New("Feature not implemented")
}
func (userRepo *userRepository) NextUserId() (uint64, error) {
return userRepo.storage.generateUserId()
}
|
package server
import (
"fmt"
pb "github.com/1851616111/xchain/pkg/protos"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"time"
)
func makeKeepaliveMsg() *pb.Message {
timeStamp, _ := ptypes.TimestampProto(time.Now())
return &pb.Message{
Action: pb.Action_Request,
Type: pb.Message_Net_PING,
Payload: []byte("ping"),
Timestamp: timeStamp,
}
}
func makePingReqMsg() *pb.Message {
ping := &pb.Ping{}
timeStamp, _ := ptypes.TimestampProto(time.Now())
payLoad, _ := proto.Marshal(ping)
return &pb.Message{
Action: pb.Action_Request,
Type: pb.Message_Net_PING,
Payload: payLoad,
Timestamp: timeStamp,
}
}
func makePingRspMsg(epList []*pb.EndPoint) *pb.Message {
ping := &pb.Ping{
EndPoint: epList,
}
timeStamp, _ := ptypes.TimestampProto(time.Now())
payLoad, err := proto.Marshal(ping)
if err != nil {
fmt.Printf("make ping response message err %v\n", err)
}
return &pb.Message{
Action: pb.Action_Response,
Type: pb.Message_Net_PING,
Payload: payLoad,
Timestamp: timeStamp,
}
}
func parsePingRspMsg(in *pb.Message) ([]*pb.EndPoint, error) {
ping := &pb.Ping{}
if err := proto.Unmarshal(in.Payload, ping); err != nil {
return ping.EndPoint, err
}
return ping.EndPoint, nil
}
func makeErrRspMsg(err error) *pb.Message {
timeStamp, _ := ptypes.TimestampProto(time.Now())
return &pb.Message{
Action: pb.Action_Response,
Type: pb.Message_Error,
Payload: []byte(err.Error()),
Timestamp: timeStamp,
}
}
func MakeOKRspMsg() *pb.Message {
timeStamp, _ := ptypes.TimestampProto(time.Now())
return &pb.Message{
Action: pb.Action_Response,
Type: pb.Message_OK,
Timestamp: timeStamp,
}
}
func MakeDeployMsg(in *pb.XCodeSpec) *pb.Message {
timeStamp, _ := ptypes.TimestampProto(time.Now())
spec, _ := proto.Marshal(in)
return &pb.Message{
Action: pb.Action_Request,
Type: pb.Message_Contract_Deploy,
Payload: spec,
Timestamp: timeStamp,
}
}
func parseDeployMsg(in *pb.Message) (*pb.XCodeSpec, error) {
deploy := &pb.XCodeSpec{}
if err := proto.Unmarshal(in.Payload, deploy); err != nil {
return nil, err
}
return deploy, nil
}
func IsOKMsg(in *pb.Message) bool {
if in == nil {
return false
}
return in.Type == pb.Message_OK
}
|
package main
import "fmt"
type IProject interface {
Add(name string, num, cost int)
GetProjectInfo()
Iterator() IProjectIterator
}
type IProjectIterator interface {
HasNext() bool
Next() IProject
}
///////////////////////////////////////////
type Project struct {
name string
num int
cost int
projectList []IProject
}
func NewProject(name string, num, cost int) *Project {
p := &Project{name:name, num:num, cost:cost}
p.projectList = append(p.projectList, p)
return p
}
func (p *Project) Add(name string, num, cost int) {
p.projectList = append(p.projectList, NewProject(name, num, cost))
}
func (p *Project) GetProjectInfo(){
fmt.Println("name is", p.name)
fmt.Println("num is", p.num)
fmt.Println("cost is", p.cost)
}
func (p *Project) Iterator() IProjectIterator {
return NewProjectIterator(p.projectList)
}
/////////////////////////////////////////////////////////
type ProjectIterator struct {
projectList []IProject
current int
}
func NewProjectIterator(projectList []IProject) *ProjectIterator {
return &ProjectIterator{projectList, 0}
}
func (p *ProjectIterator) HasNext() bool {
return p.current < len(p.projectList) && p.projectList[p.current] != nil
}
func (p *ProjectIterator) Next() IProject {
res := p.projectList[p.current]
p.current++
return res
}
func main() {
p := NewProject("p1", 1, 111)
p.Add("p4", 4, 444)
p.Add("p2", 2, 222)
p.Add("p3", 3, 333)
for it := p.Iterator(); it.HasNext();{
it.Next().GetProjectInfo()
}
}
//name is p1
//num is 1
//cost is 111
//name is p4
//num is 4
//cost is 444
//name is p2
//num is 2
//cost is 222
//name is p3
//num is 3
//cost is 333 |
package zapcloudwatch
import (
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"go.uber.org/zap/zapcore"
)
// Interface defines CloudwatchHook interface
var _ Interface = (*CloudwatchHook)(nil)
// Interface of pgp
type Interface interface {
GetHook() (func(zapcore.Entry) error, error)
sendEvent(params *cloudwatchlogs.PutLogEventsInput) error
Levels() []zapcore.Level
isAcceptedLevel(level zapcore.Level) bool
}
|
package dynamicstruct
import (
"encoding/json"
"fmt"
"github.com/goldeneggg/structil"
)
func Example() {
type Hoge struct {
Key string
Value interface{}
}
hogePtr := &Hoge{
Key: "keystr",
Value: "valuestr",
}
// Add fields using Builder with AddXXX method chain
b := NewBuilder().
AddString("StringField").
AddInt("IntField").
AddFloat32("Float32Field").
AddBool("BoolField").
AddMap("MapField", SampleString, SampleFloat32).
AddStructPtr("StructPtrField", hogePtr).
AddSlice("SliceField", SampleInt).
AddInterfaceWithTag("SomeObjectField", true, `json:"some_object_field"`)
// Remove removes a field by assigned name
b = b.Remove("Float32Field")
// SetStructName sets the name of DynamicStruct
// Note: Default struct name is "DynamicStruct"
b.SetStructName("MyStruct")
// Build returns a DynamicStruct
ds, err := b.Build()
if err != nil {
panic(err)
}
// Print struct definition with Definition method
// Struct fields are automatically orderd by field name
fmt.Println(ds.Definition())
// Output:
// type MyStruct struct {
// BoolField bool
// IntField int
// MapField map[string]float32
// SliceField []int
// SomeObjectField *interface {} `json:"some_object_field"`
// StringField string
// StructPtrField struct {
// Key string
// Value interface {}
// }
// }
}
func Example_unmarshalJSON() {
type Hoge struct {
Key string `json:"key"`
Value interface{} `json:"value"`
}
var hogePtr *Hoge
b := NewBuilder().
AddStringWithTag("StringField", `json:"string_field"`).
AddIntWithTag("IntField", `json:"int_field"`).
AddFloat32WithTag("Float32Field", `json:"float32_field"`).
AddBoolWithTag("BoolField", `json:"bool_field"`).
AddStructPtrWithTag("StructPtrField", hogePtr, `json:"struct_ptr_field"`).
AddSliceWithTag("SliceField", "", `json:"slice_string_field"`)
ds, err := b.Build()
if err != nil {
panic(err)
}
// prints Go struct definition of this DynamicStruct
fmt.Println(ds.Definition())
// try json unmarshal with NewInterface
input := []byte(`
{
"string_field":"あいうえお",
"int_field":9876,
"float32_field":5.67,
"bool_field":true,
"struct_ptr_field":{
"key":"hogekey",
"value":"hogevalue"
},
"slice_string_field":[
"a",
"b"
]
}
`)
intf := ds.NewInterface() // returns a new interface of this DynamicStruct
err = json.Unmarshal(input, &intf)
if err != nil {
panic(err)
}
g, err := structil.NewGetter(intf)
if err != nil {
panic(err)
}
s, _ := g.String("StringField")
f, _ := g.Float32("Float32Field")
strct, _ := g.Get("StructPtrField")
sl, _ := g.Get("SliceField")
fmt.Printf(
"num of fields=%d\n'StringField'=%s\n'Float32Field'=%f\n'StructPtrField'=%+v\n'SliceField'=%+v",
g.NumField(),
s,
f,
strct,
sl,
)
// Output:
// type DynamicStruct struct {
// BoolField bool `json:"bool_field"`
// Float32Field float32 `json:"float32_field"`
// IntField int `json:"int_field"`
// SliceField []string `json:"slice_string_field"`
// StringField string `json:"string_field"`
// StructPtrField struct {
// Key string `json:"key"`
// Value interface {} `json:"value"`
// } `json:"struct_ptr_field"`
// }
// num of fields=6
// 'StringField'=あいうえお
// 'Float32Field'=5.670000
// 'StructPtrField'={Key:hogekey Value:hogevalue}
// 'SliceField'=[a b]
}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package blkgenorV2
import (
"github.com/MatrixAINetwork/go-matrix/consensus/blkmanage"
"github.com/MatrixAINetwork/go-matrix/ca"
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/core/types"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/params/manparams"
"github.com/MatrixAINetwork/go-matrix/txpoolCache"
"github.com/pkg/errors"
)
func (p *Process) processHeaderGen(AIResult *mc.HD_V2_AIMiningRspMsg) error {
log.Info(p.logExtraInfo(), "processHeaderGen", "start")
defer log.Info(p.logExtraInfo(), "processHeaderGen", "end")
if p.bcInterval == nil {
log.Error(p.logExtraInfo(), "区块生成阶段", "广播周期信息为空")
return errors.New("广播周期信息为空")
}
if p.parentHeader == nil {
log.Error(p.logExtraInfo(), "区块生成阶段", "父区块为nil")
return errors.New("父区块为nil")
}
version, err := p.pm.manblk.ProduceBlockVersion(p.number, string(p.parentHeader.Version))
if err != nil {
return err
}
originHeader, extraData, err := p.pm.manblk.Prepare(blkmanage.CommonBlk, version, p.number, p.bcInterval, p.parentHash, AIResult)
if err != nil {
log.Error(p.logExtraInfo(), "准备阶段失败", err)
return err
}
onlineConsensusResults, ok := extraData.([]*mc.HD_OnlineConsensusVoteResultMsg)
if !ok {
log.Error(p.logExtraInfo(), "反射在线状态失败", "")
return errors.New("反射在线状态失败")
}
txsCode, stateDB, receipts, originalTxs, finalTxs, _, err := p.pm.manblk.ProcessState(blkmanage.CommonBlk, version, originHeader, nil)
if err != nil {
log.Error(p.logExtraInfo(), "运行交易和状态树失败", err)
return err
}
//运行完matrix状态树后,生成root (p.blockChain(), header, stateDB, nil, tsBlock.Currencies())
block, _, err := p.pm.manblk.Finalize(blkmanage.CommonBlk, version, originHeader, stateDB, finalTxs, nil, receipts, nil)
if err != nil {
log.Error(p.logExtraInfo(), "Finalize失败", err)
return err
}
reqMsg := &mc.HD_BlkConsensusReqMsg{
Header: block.Header(),
TxsCode: txsCode,
ConsensusTurn: p.consensusTurn,
OnlineConsensusResults: onlineConsensusResults,
From: ca.GetSignAddress(),
}
//send to local block verify module
if len(originalTxs) > 0 {
txpoolCache.MakeStruck(types.GetTX(originalTxs), reqMsg.Header.HashNoSignsAndNonce(), p.number)
}
p.sendHeaderVerifyReq(&mc.LocalBlockVerifyConsensusReq{BlkVerifyConsensusReq: reqMsg, OriginalTxs: originalTxs, FinalTxs: finalTxs, Receipts: receipts, State: stateDB})
return nil
}
func (p *Process) sendHeaderVerifyReq(req *mc.LocalBlockVerifyConsensusReq) {
log.Info(p.logExtraInfo(), "本地发送区块验证请求", req.BlkVerifyConsensusReq.Header.HashNoSignsAndNonce().TerminalString(), "高度", p.number)
mc.PublishEvent(mc.BlockGenor_HeaderVerifyReq, req)
p.startConsensusReqSender(req.BlkVerifyConsensusReq)
}
func (p *Process) startConsensusReqSender(req *mc.HD_BlkConsensusReqMsg) {
p.closeMsgSender()
sender, err := common.NewResendMsgCtrl(req, p.sendConsensusReqFunc, manparams.BlkPosReqSendInterval, manparams.BlkPosReqSendTimes)
if err != nil {
log.Error(p.logExtraInfo(), "创建req发送器", "失败", "err", err)
return
}
p.msgSender = sender
}
func (p *Process) sendConsensusReqFunc(data interface{}, times uint32) {
req, OK := data.(*mc.HD_BlkConsensusReqMsg)
if !OK {
log.Error(p.logExtraInfo(), "发出区块共识req", "反射消息失败", "次数", times)
return
}
log.Info(p.logExtraInfo(), "!!!!网络发送区块验证请求, hash", req.Header.HashNoSignsAndNonce(), "tx数量", req.TxsCodeCount(), "次数", times)
p.pm.hd.SendNodeMsg(mc.HD_BlkConsensusReq, req, common.RoleValidator, nil)
}
func (p *Process) processBroadcastBlockGen() error {
log.Info(p.logExtraInfo(), "processBroadcastBlockGen", "start")
defer log.Info(p.logExtraInfo(), "processBroadcastBlockGen", "end")
if p.bcInterval == nil {
log.Error(p.logExtraInfo(), "广播区块生成阶段", "广播周期信息为空")
return errors.New("广播周期信息为空")
}
if p.parentHeader == nil {
log.Error(p.logExtraInfo(), "广播区块生成阶段", "父区块为nil")
return errors.New("父区块为nil")
}
version, err := p.pm.manblk.ProduceBlockVersion(p.number, string(p.parentHeader.Version))
if err != nil {
return err
}
originHeader, _, err := p.pm.manblk.Prepare(blkmanage.BroadcastBlk, version, p.number, p.bcInterval, p.parentHash)
if err != nil {
log.Error(p.logExtraInfo(), "广播区块生成阶段", "准备区块失败", err)
return err
}
_, stateDB, receipts, _, finalTxs, _, err := p.pm.manblk.ProcessState(blkmanage.BroadcastBlk, version, originHeader, nil)
if err != nil {
log.Error(p.logExtraInfo(), "广播区块生成阶段, 运行交易和状态树失败", err)
return err
}
//运行完matrix状态树后,生成root
block, _, err := p.pm.manblk.Finalize(blkmanage.BroadcastBlk, version, originHeader, stateDB, finalTxs, nil, receipts, nil)
if err != nil {
log.Error(p.logExtraInfo(), "Finalize失败", err)
return err
}
finalHeader := block.Header()
err = p.setSignatures(finalHeader)
if err != nil {
return err
}
p.sendBroadcastRspMsg(&mc.BlockData{Header: finalHeader, Txs: finalTxs})
return nil
}
func (p *Process) setSignatures(header *types.Header) error {
signHash := header.HashNoSignsAndNonce()
sign, err := p.signHelper().SignHashWithValidateByAccount(signHash.Bytes(), true, ca.GetDepositAddress())
if err != nil {
log.Error(p.logExtraInfo(), "广播区块生成,签名错误", err)
return err
}
//log.Debug(p.logExtraInfo(), "test log", "广播区块签名成功", "sign hash", signHash.TerminalString(), "sign account", ca.GetDepositAddress().Hex(), "version", string(header.Version))
header.Signatures = make([]common.Signature, 0, 1)
header.Signatures = append(header.Signatures, sign)
return nil
}
func (p *Process) sendBroadcastRspMsg(bcBlock *mc.BlockData) {
log.Info(p.logExtraInfo(), "发送广播区块结果", bcBlock.Header.HashNoSigns().TerminalString(), "高度", p.number)
p.startBroadcastRspSender(bcBlock)
}
func (p *Process) startBroadcastRspSender(bcBlock *mc.BlockData) {
p.closeMsgSender()
sender, err := common.NewResendMsgCtrl(bcBlock, p.sendBroadcastRspFunc, manparams.BlkPosReqSendInterval, manparams.BlkPosReqSendTimes)
if err != nil {
log.Error(p.logExtraInfo(), "创建广播区块结果发送器", "失败", "err", err)
return
}
p.msgSender = sender
}
func (p *Process) sendBroadcastRspFunc(data interface{}, times uint32) {
bcBlock, OK := data.(*mc.BlockData)
if !OK {
log.Error(p.logExtraInfo(), "发出广播区块结果", "反射消息失败", "次数", times)
return
}
msg := &mc.HD_BroadcastMiningRspMsg{
BlockMainData: bcBlock,
}
log.Trace(p.logExtraInfo(), "!!网络发送广播区块结果, hash", msg.BlockMainData.Header.HashNoSignsAndNonce(), "交易数量", len(types.GetTX(msg.BlockMainData.Txs)), "次数", times, "高度", msg.BlockMainData.Header.Number)
p.pm.hd.SendNodeMsg(mc.HD_BroadcastMiningRsp, msg, common.RoleValidator, nil)
}
|
package wallet
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"log"
"github.com/FeiyangTan/golang-blockchain/util"
"golang.org/x/crypto/ripemd160"
)
const version = byte(0x00)
const addressChecksumLen = 4
// Wallet 公私钥对
type Wallet struct {
PrivateKey ecdsa.PrivateKey
PublicKey []byte
}
// NewWallet 生成wallet
func newWallet() *Wallet {
// 生成公私钥对
curve := elliptic.P256()
private, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
log.Panic(err)
}
pubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)
// 生成钱包
wallet := Wallet{*private, pubKey}
return &wallet
}
// GetAddress 生成钱包地址
func (w Wallet) getAddress() []byte {
pubKeyHash := hashPubKey(w.PublicKey)
//加入版本信息
versionedPayload := append([]byte{version}, pubKeyHash...)
//加入checksums
firstSHA := sha256.Sum256(versionedPayload)
secondSHA := sha256.Sum256(firstSHA[:])
checksum := secondSHA[:addressChecksumLen]
fullPayload := append(versionedPayload, checksum...)
address := util.Base58Encode(fullPayload)
return address
}
// HashPubKey 取公钥哈希
func hashPubKey(pubKey []byte) []byte {
publicSHA256 := sha256.Sum256(pubKey)
RIPEMD160Hasher := ripemd160.New()
_, err := RIPEMD160Hasher.Write(publicSHA256[:])
if err != nil {
log.Panic(err)
}
publicRIPEMD160 := RIPEMD160Hasher.Sum(nil)
return publicRIPEMD160
}
// Checksum 生成公钥checksum
func checksum(payload []byte) []byte {
firstSHA := sha256.Sum256(payload)
secondSHA := sha256.Sum256(firstSHA[:])
return secondSHA[:addressChecksumLen]
}
|
package main
import (
"fmt"
"math/big"
)
func main() {
i := big.NewInt(1)
j := big.NewInt(1)
temp := big.NewInt(0)
for c := 3; ; c++ {
temp.Add(i, j)
i.Set(j)
j.Set(temp)
if len(j.Text(10)) >= 1000 {
fmt.Println("c:", c)
break
}
}
}
|
package problem3
import (
"container/list"
"fmt"
"io/ioutil"
"math"
"strconv"
"strings"
)
type FuelSystemMatrix struct {
dim [][]int
}
type Wire struct {
xPos int
yPos int
startX int
startY int
}
func NewWire(initXpos int, initYpos int) *Wire {
w := Wire{
xPos: initXpos,
yPos: initYpos,
startX: initXpos,
startY: initYpos,
}
return &w
}
func (w *Wire) equals(w2 Wire) bool {
if w.xPos == w2.xPos && w.yPos == w2.yPos {
return true
}
return false
}
func (w *Wire) getDistance(w2 Wire) float64 {
xDist := math.Abs(float64(w.startX - w2.xPos))
yDist := math.Abs(float64(w.startY - w2.yPos))
return xDist + yDist
}
func New(MaxRightSize int, MaxUpSize int) *FuelSystemMatrix {
dimArr := make([][]int, MaxUpSize)
for i := 0; i < MaxUpSize; i++ {
yArr := make([]int, MaxRightSize)
dimArr[i] = yArr
}
f := FuelSystemMatrix{
dim: dimArr,
}
return &f
}
func (m *FuelSystemMatrix) WireRight(dist int, w *Wire) {
newPos := w.xPos + dist
fmt.Printf("Moving wire right %v to position %v, %v\n", dist, newPos, w.yPos)
xArr := m.dim[w.yPos]
for i := w.xPos + 1; i < newPos+1; i++ {
xArr[i]++
}
w.xPos = newPos
}
func (m *FuelSystemMatrix) WireLeft(dist int, w *Wire) {
newPos := w.xPos - dist
fmt.Printf("Moving wire left %v to position %v, %v\n", dist, newPos, w.yPos)
xArr := m.dim[w.yPos]
for i := w.xPos - 1; i > newPos-1; i-- {
xArr[i]++
}
w.xPos = newPos
}
func (m *FuelSystemMatrix) WireUp(dist int, w *Wire) {
newPos := w.yPos + dist
fmt.Printf("Moving wire up %v to position %v, %v\n", dist, w.xPos, newPos)
for i := w.yPos + 1; i < newPos+1; i++ {
xArr := m.dim[i]
xArr[w.xPos]++
}
w.yPos = newPos
}
func (m *FuelSystemMatrix) WireDown(dist int, w *Wire) {
newPos := w.yPos - dist
fmt.Printf("Moving wire down %v to position %v, %v\n", dist, w.xPos, newPos)
for i := w.yPos - 1; i > newPos-1; i-- {
xArr := m.dim[i]
xArr[w.xPos]++
}
w.yPos = newPos
}
func (m *FuelSystemMatrix) FindCrossedWires() *list.List {
results := list.New()
for i := range m.dim {
for j := range m.dim[i] {
if i != 0 && j != 0 && m.dim[i][j] > 1 {
results.PushBack(Wire{
xPos: j,
yPos: i,
})
}
}
}
return results
}
func ParseOp(op string) (string, int) {
op1 := string(op[0])
dist, _ := strconv.Atoi(op[1:])
return op1, dist
}
func CalcMaxDimensions(wRep []string) (int, int, int, int) {
maxX := 0
minX := 0
currentX := 0
maxY := 0
minY := 0
currentY := 0
for _, x := range wRep {
op, dist := ParseOp(x)
switch op {
case "R":
currentX += dist
if currentX > maxX {
maxX = currentX
}
case "L":
currentX -= dist
if currentX < minX {
minX = currentX
}
case "U":
currentY += dist
if currentY > maxY {
maxY = currentY
}
case "D":
currentY -= dist
if currentY < minY {
minY = currentY
}
}
}
absMinX := int(math.Abs(float64(minX)))
absMinY := int(math.Abs(float64(minY)))
if absMinX > maxX {
maxX = absMinX
}
if absMinY > maxY {
maxY = absMinY
}
return maxX, maxY, minX, minY
}
func ParseOpAndApply(op string, m *FuelSystemMatrix, w *Wire) {
op1, dist := ParseOp(op)
switch op1 {
case "R":
m.WireRight(dist, w)
case "L":
m.WireLeft(dist, w)
case "U":
m.WireUp(dist, w)
case "D":
m.WireDown(dist, w)
}
}
func ParseArrayFromProblemInput(path string) [][]string {
dat, _ := ioutil.ReadFile(path)
sdata := string(dat)
trimData := strings.Split(strings.TrimSuffix(sdata, "\n"), "\n")
var dataSplit [][]string
for i := range trimData {
dataSplit = append(dataSplit, strings.Split(trimData[i], ","))
}
return dataSplit
}
func GetLowestDistance(l *list.List, w1 *Wire) (float64, *Wire) {
var w Wire
lowestDistance := float64(0)
current := l.Front()
for i := 0; i < l.Len(); i++ {
dist := w1.getDistance(current.Value.(Wire))
if lowestDistance == 0 || (dist < lowestDistance && dist != 0) {
lowestDistance = dist
w = current.Value.(Wire)
}
current = current.Next()
}
return lowestDistance, &w
}
func DoProblem3() float64 {
parsed := ParseArrayFromProblemInput("./problem3/input")
p1 := parsed[0]
p2 := parsed[1]
var maxX, maxY, minX, minY int
maxXp1, maxYp1, minXp1, minYp1 := CalcMaxDimensions(p1)
maxXp2, maxYp2, minXp2, minYp2 := CalcMaxDimensions(p2)
if maxXp1 > maxXp2 {
maxX = maxXp1
} else {
maxX = maxXp2
}
if minXp1 < minXp2 {
minX = minXp1
} else {
minX = minXp2
}
if maxYp1 > maxYp2 {
maxY = maxYp1
} else {
maxY = maxYp2
}
if minYp1 < minYp2 {
minY = minYp1
} else {
minY = minYp2
}
if minX > 0 {
minX = 0
}
if minY > 0 {
minY = 0
}
fmt.Printf("X Ranges: %v - %v, Y Ranges: %v - %v\n", minX, maxX, minY, maxY)
fm := New((maxX-minX)+2, (maxY-minY)+2)
startingPointX := 1 - minX
startingPointY := 1 - minY
w1 := NewWire(startingPointX, startingPointY)
w2 := NewWire(startingPointX, startingPointY)
fmt.Printf("Wire 1: %v\n", w1)
fmt.Printf("Wire 2: %v\n", w2)
for _, x := range p1 {
ParseOpAndApply(x, fm, w1)
}
for _, x := range p2 {
ParseOpAndApply(x, fm, w2)
}
crossedWires := fm.FindCrossedWires()
dist, closest := GetLowestDistance(crossedWires, w1)
fmt.Println(closest)
return dist
}
|
package command
import (
"errors"
"golang.org/x/net/context"
"github.com/xozrc/cqrs/types"
)
var (
commandFactoryMap map[string]CommandFactory
)
var (
CommandFactoryNoFound = errors.New("command factory no found")
)
func init() {
commandFactoryMap = make(map[string]CommandFactory)
}
//Command
type Command interface {
Id() types.Guid
}
type CommandFactory interface {
NewCommand(id types.Guid) Command
}
type CommandFactoryFunc func(id types.Guid) Command
func (veff CommandFactoryFunc) NewCommand(id types.Guid) Command {
return veff(id)
}
func RegisterCommand(key string, cf CommandFactory) {
commandFactoryMap[key] = cf
}
func NewCommand(key string, id types.Guid) (cmd Command, err error) {
factory, ok := commandFactoryMap[key]
if !ok {
err = CommandFactoryNoFound
return
}
cmd = factory.NewCommand(id)
return
}
//CommandHandler
type CommandHandler interface {
HandleCommand(ctx context.Context, c Command) error
}
//CommandHandlerFunc implements CommandHandler
type CommandHandlerFunc func(ctx context.Context, c Command) error
//HandleCommand call self func
func (chf CommandHandlerFunc) HandleCommand(ctx context.Context, c Command) error {
return chf(ctx, c)
}
|
package dockerconfig
import (
"github.com/Dynatrace/dynatrace-operator/src/logger"
)
var (
log = logger.Factory.GetLogger("docker-config")
)
|
// Copyright (C) 2022 Storj Labs, Inc.
// See LICENSE for copying information.
//go:build go1.18
// +build go1.18
package useragent_test
import (
"testing"
"storj.io/common/useragent"
)
func FuzzParseEntries(f *testing.F) {
f.Add([]byte(""))
f.Add([]byte("storj.io-common/v0.0.0-00010101000000-000000000000"))
f.Add([]byte("storj.io-common/v0.0.0-00010101000000"))
f.Add([]byte("storj.io-common/v9.0.0"))
f.Add([]byte("Mozilla"))
f.Add([]byte("Mozilla/5.0"))
f.Add([]byte("Mozilla/5.0 (Linux; U; Android 4.4.3;)"))
f.Add([]byte("storj.io-uplink/v0.0.1 storj.io-drpc/v5.0.0+123+123 Mozilla/5.0 (Linux; U; Android 4.4.3;) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 Opera News/1.0"))
f.Add([]byte("!#$%&'*+-.^_`|~/!#$%&'*+-.^_`|~"))
f.Fuzz(func(t *testing.T, data []byte) {
_, _ = useragent.ParseEntries(data)
})
}
|
package weibo
type Comment struct {
ID int64 `json:"id" db:"id"`
UserID int64 `json:"user_id" db:"user_id"`
WeiboID int64 `json:"weibo_id" db:"weibo_id"`
Content string `json:"content" db:"content"`
CreatedAt int64 `json:"created_at" db:"created_at"`
}
|
package test
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
)
func post(url string, params []byte) (error, string, []byte) {
fmt.Println("URL:>", url)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(params))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err, "", []byte("")
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
return nil, resp.Status, body
}
func test(w http.ResponseWriter, url string, params []byte) {
err, status, body := post(url, params)
if err != nil {
fmt.Fprintln(w, "url: "+url+", method: post, status: Error")
fmt.Fprintln(w, "response Status: "+status)
fmt.Fprintln(w, "response Body: "+string(body))
fmt.Fprintln(w, "error: "+err.Error())
} else {
fmt.Fprintln(w, "url: "+url+", method: post, status: OK")
fmt.Fprintln(w, "response Status: "+status)
fmt.Fprintln(w, "response Body: "+string(body))
}
fmt.Fprintln(w, "")
}
func Test(w http.ResponseWriter, r *http.Request) {
var url string
var params []byte
w.WriteHeader(http.StatusOK)
url = "http://localhost:8080/api/v1/auth/registrar"
params = []byte("nombres=nombres&apellidos=apellidos&" +
"email=email@dominio.com&usuario=username&" +
"passwd=123456&passwdConfirm=123456")
//test(w, url, params)
url = "http://localhost:8080/api/v1/auth/login"
params = []byte("usuario=username&passwd=123456")
//test(w, url, params)
url = "http://localhost:8080/api/v1/auth/login"
params = []byte("usuario=username&passwd=12345678")
//test(w, url, params)
url = "http://localhost:8080/api/v1/chats/mensaje"
params = []byte("tipo=usuario&usuario=dmonsalve&mensaje=hola+mundo")
test(w, url, params)
}
|
package transaction
import (
"fmt"
"testing"
)
func TestFromRaw(t *testing.T) {
tx1 := GetTestTransaction()
raw := ToRaw(tx1)
tx2 := FromRaw(raw)
if tx1 != tx2 {
fmt.Printf("Tx1: %x \n", tx1)
fmt.Printf("Tx2: %x \n", tx2)
t.Error("tx1 != tx2")
}
}
func TestFromJSON(t *testing.T) {
jsonStringA := ToJson(GetTestTransaction())
tx := FromJSON(jsonStringA)
jsonStringB := ToJson(tx)
if jsonStringA != jsonStringB {
fmt.Println("A: ", jsonStringA)
fmt.Println("B: ", jsonStringB)
t.Error("A != B")
}
}
func TestCheckSignature(t *testing.T) {
tx := GetTestTransaction()
raw := ToRaw(tx)
if !CheckSignature(raw) {
fmt.Printf("Signature: %x \n", raw.Signature)
t.Error("signature is not correct")
}
}
|
package models
import (
db "go_cnode/database"
"gopkg.in/mgo.v2/bson"
"time"
"log"
)
//"log"
type Message struct {
Id bson.ObjectId `bson:"_id"`
Type string `json:"type"`
Master_id bson.ObjectId `json:"master_id" `
Author_id bson.ObjectId `bson:"author_id" `
Topic_id bson.ObjectId `json:"topic_id" `
Reply_id bson.ObjectId `json:"reply_id" `
Has_read bool `json:"has_read"`
Create_at time.Time `bson:"create_at"`
}
type MessageModel struct{}
func (p *MessageModel) GetMessagesCount(id string) (count int, err error) {
mgodb:=db.Mgodb
objectId := bson.ObjectIdHex(id)
count, err = mgodb.C("messages").Find(bson.M{"master_id": objectId,"has_read": false}).Count()
return count, err
}
func (p *MessageModel) GetMessageById(id string) (message Message, err error) {
mgodb:=db.Mgodb
objectId := bson.ObjectIdHex(id)
err = mgodb.C("messages").Find(bson.M{"_id": objectId}).One(&message)
return message, err
}
func (p *MessageModel) GetMessagesByUserId(id bson.ObjectId) (messages []Message, err error) {
mgodb:=db.Mgodb
err = mgodb.C("messages").Find(bson.M{"master_id": id, "has_read": true}).Sort("-create_at").All(&messages)
return messages, err
}
func (p *MessageModel) GetUnreadMessagesByUserId(id bson.ObjectId) (messages []Message, err error) {
mgodb:=db.Mgodb
err = mgodb.C("messages").Find(bson.M{"master_id": id, "has_read": false}).Sort("-create_at").All(&messages)
return messages, err
}
func (p *MessageModel) UpdateOneMessageToRead(msgId string) (err error) {
mgodb:=db.Mgodb
objectId := bson.ObjectIdHex(msgId)
err = mgodb.C("messages").Update(bson.M{"_id": objectId},
bson.M{
"$set": bson.M{"has_read": true},
})
return err
}
func (p *MessageModel) UpdateMessagesToRead(userId string, messages []Message) (err error) {
mgodb:=db.Mgodb
var ids []bson.ObjectId
for _, message := range messages {
ids = append(ids, message.Id)
}
log.Println(ids)
_,err = mgodb.C("messages").UpdateAll(
bson.M{"master_id": bson.ObjectIdHex(userId),
"_id":bson.M{"$in": ids}},
bson.M{
"$set": bson.M{"has_read": true},
})
return err
}
func (p *MessageModel) SendAtMessage(userId string, authorId string, topicId string, replyId bson.ObjectId) (err error) {
mgodb:=db.Mgodb
message := Message{
Id: bson.NewObjectId(),
Type: "at",
Master_id: bson.ObjectIdHex(userId),
Topic_id: bson.ObjectIdHex(topicId),
Author_id: bson.ObjectIdHex(authorId),
Reply_id: replyId,
Create_at: time.Now(),
}
err = mgodb.C("messages").Insert(&message)
return err
}
func (p *MessageModel) SendReplyMessage(userId string, authorId string, topicId string, replyId bson.ObjectId) (err error) {
//mgodb := db.MogSession.DB("egg_cnode")
mgodb:=db.Mgodb
message := Message{
Id: bson.NewObjectId(),
Type: "reply",
Master_id: bson.ObjectIdHex(userId),
Topic_id: bson.ObjectIdHex(topicId),
Author_id: bson.ObjectIdHex(authorId),
Reply_id: replyId,
Create_at: time.Now(),
}
err = mgodb.C("messages").Insert(&message)
return err
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.package spec
package main
import (
"fmt"
_ "net/http/pprof"
"os"
"time"
"github.com/pingcap/tidb-operator/tests/pkg/apimachinery"
v1 "k8s.io/api/core/v1"
"github.com/golang/glog"
"github.com/jinzhu/copier"
"github.com/pingcap/tidb-operator/tests"
"github.com/pingcap/tidb-operator/tests/pkg/blockwriter"
"github.com/pingcap/tidb-operator/tests/pkg/client"
"k8s.io/apiserver/pkg/util/logs"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
conf := tests.ParseConfigOrDie()
conf.ManifestDir = "/manifests"
cli, kubeCli := client.NewCliOrDie()
oa := tests.NewOperatorActions(cli, kubeCli, 5*time.Second, conf, nil)
operatorInfo := &tests.OperatorConfig{
Namespace: "pingcap",
ReleaseName: "operator",
Image: conf.OperatorImage,
Tag: conf.OperatorTag,
SchedulerImage: "mirantis/hypokube",
SchedulerTag: "final",
SchedulerFeatures: []string{
"StableScheduling=true",
},
LogLevel: "2",
WebhookServiceName: "webhook-service",
WebhookSecretName: "webhook-secret",
WebhookConfigName: "webhook-config",
ImagePullPolicy: v1.PullIfNotPresent,
TestMode: true,
}
ns := os.Getenv("NAMESPACE")
context, err := apimachinery.SetupServerCert(ns, tests.WebhookServiceName)
if err != nil {
panic(err)
}
go tests.StartValidatingAdmissionWebhookServerOrDie(context)
initTidbVersion, err := conf.GetTiDBVersion()
if err != nil {
glog.Fatal(err)
}
name1 := "e2e-cluster1"
name2 := "e2e-cluster2"
name3 := "e2e-pd-replicas-1"
topologyKey := "rack"
clusterInfos := []*tests.TidbClusterConfig{
{
Namespace: name1,
ClusterName: name1,
OperatorTag: conf.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion),
StorageClassName: "local-storage",
Password: "",
UserName: "root",
InitSecretName: fmt.Sprintf("%s-set-secret", name1),
BackupSecretName: fmt.Sprintf("%s-backup-secret", name1),
BackupName: "backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "200m",
"tikv.resources.requests.memory": "1Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "200m",
"tidb.resources.requests.memory": "1Gi",
"discovery.image": conf.OperatorImage,
},
Args: map[string]string{},
Monitor: true,
BlockWriteConfig: blockwriter.Config{
TableNum: 1,
Concurrency: 1,
BatchSize: 1,
RawSize: 1,
},
TopologyKey: topologyKey,
EnableConfigMapRollout: true,
},
{
Namespace: name2,
ClusterName: name2,
OperatorTag: conf.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
UserName: "root",
InitSecretName: fmt.Sprintf("%s-set-secret", name2),
BackupSecretName: fmt.Sprintf("%s-backup-secret", name2),
BackupName: "backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "200m",
"tikv.resources.requests.memory": "1Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "200m",
"tidb.resources.requests.memory": "1Gi",
"discovery.image": conf.OperatorImage,
},
Args: map[string]string{},
Monitor: true,
BlockWriteConfig: blockwriter.Config{
TableNum: 1,
Concurrency: 1,
BatchSize: 1,
RawSize: 1,
},
TopologyKey: topologyKey,
EnableConfigMapRollout: false,
},
{
Namespace: name2,
ClusterName: name3,
OperatorTag: conf.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
UserName: "root",
InitSecretName: fmt.Sprintf("%s-set-secret", name2),
BackupSecretName: fmt.Sprintf("%s-backup-secret", name2),
Resources: map[string]string{
"pd.replicas": "1",
"discovery.image": conf.OperatorImage,
},
TopologyKey: topologyKey,
},
}
defer func() {
oa.DumpAllLogs(operatorInfo, clusterInfos)
}()
oa.LabelNodesOrDie()
// deploy operator
if err := oa.CleanOperator(operatorInfo); err != nil {
oa.DumpAllLogs(operatorInfo, nil)
glog.Fatal(err)
}
if err = oa.DeployOperator(operatorInfo); err != nil {
oa.DumpAllLogs(operatorInfo, nil)
glog.Fatal(err)
}
// deploy tidbclusters
for _, clusterInfo := range clusterInfos {
if err = oa.CleanTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
if err = oa.DeployTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
// check disaster tolerance
for _, clusterInfo := range clusterInfos {
oa.CheckDisasterToleranceOrDie(clusterInfo)
}
for _, clusterInfo := range clusterInfos {
go oa.BeginInsertDataToOrDie(clusterInfo)
}
// before upgrade cluster, register webhook first
oa.RegisterWebHookAndServiceOrDie(context, operatorInfo)
// upgrade test
upgradeTidbVersions := conf.GetUpgradeTidbVersions()
for _, upgradeTidbVersion := range upgradeTidbVersions {
oldTidbMembersAssignedNodes := map[string]map[string]string{}
for _, clusterInfo := range clusterInfos {
assignedNodes, err := oa.GetTidbMemberAssignedNodes(clusterInfo)
if err != nil {
glog.Fatal(err)
}
oldTidbMembersAssignedNodes[clusterInfo.ClusterName] = assignedNodes
clusterInfo = clusterInfo.UpgradeAll(upgradeTidbVersion)
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
// only check manual pause for 1 cluster
if len(clusterInfos) >= 1 {
oa.CheckManualPauseTiDBOrDie(clusterInfos[0])
}
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
if err = oa.CheckTidbMemberAssignedNodes(clusterInfo, oldTidbMembersAssignedNodes[clusterInfo.ClusterName]); err != nil {
glog.Fatal(err)
}
}
}
// update configuration on the fly
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.
UpdatePdMaxReplicas(conf.PDMaxReplicas).
UpdatePDLogLevel("debug").
UpdateTiKVGrpcConcurrency(conf.TiKVGrpcConcurrency).
UpdateTiDBTokenLimit(conf.TiDBTokenLimit)
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
}
// after upgrade cluster, clean webhook
oa.CleanWebHookAndService(operatorInfo)
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScalePD(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiKV(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiDB(1)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
// check data regions disaster tolerance
for _, clusterInfo := range clusterInfos {
oa.CheckDataRegionDisasterToleranceOrDie(clusterInfo)
}
// backup and restore
backupClusterInfo := clusterInfos[0]
restoreClusterInfo := &tests.TidbClusterConfig{}
copier.Copy(restoreClusterInfo, backupClusterInfo)
restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-other"
restoreClusterInfo.InitSecretName = fmt.Sprintf("%s-set-secret", restoreClusterInfo.ClusterName)
restoreClusterInfo.BackupSecretName = fmt.Sprintf("%s-backup-secret", restoreClusterInfo.ClusterName)
if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil {
glog.Fatal(err)
}
if err = oa.DeployTidbCluster(restoreClusterInfo); err != nil {
glog.Fatal(err)
}
if err = oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil {
glog.Fatal(err)
}
oa.BackupRestoreOrDie(backupClusterInfo, restoreClusterInfo)
//clean temp dirs when e2e success
err = conf.CleanTempDirs()
if err != nil {
glog.Errorf("failed to clean temp dirs, this error can be ignored.")
}
glog.Infof("\nFinished.")
}
|
package readwhilewrite
import (
"context"
"io"
"net/http"
"os"
)
// SendFileHTTP serves a file as a HTTP response while fw is writing to the same file.
//
// Once it gets an EOF, it waits more writes by the writer. If the ctx is done while
// waiting, SendFileHTTP returns. Typically you want to pass r.Context() as ctx for
// r *http.Request.
//
// If you set the Content-Length header before calling SendFileHTTP, the sendfile
// system call is used on Linux.
func SendFileHTTP(ctx context.Context, w http.ResponseWriter, file *os.File, fw *Writer) (n int64, err error) {
wroteC := fw.subscribe()
defer fw.unsubscribe(wroteC)
var n1 int64
for {
n1, err = io.Copy(w, file)
n += n1
if err != nil && err != io.EOF {
return
}
select {
case _, ok := <-wroteC:
if ok {
continue
}
if fw.err != nil {
err = fw.err
return
}
n1, err = io.Copy(w, file)
n += n1
return
case <-ctx.Done():
err = ctx.Err()
return
}
}
}
|
package main
import "fmt"
func sqr(x *int) {
*x = *x * *x
}
func swap(x *int, y *int) {
a := *x
*x = *y
*y = a
}
func main() {
x := new(int)
*x = 5
sqr(x)
y := new(int)
*y = 10
fmt.Println(*x, *y)
swap(x, y)
fmt.Println(*x, *y)
v := 5
u := 10
fmt.Println(v, u)
swap(&u, &v)
fmt.Println(v, u)
}
|
package filepathfilter_test
import (
"os"
"path/filepath"
"sync"
"testing"
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/tools"
)
func BenchmarkFilterSimplePath(b *testing.B) {
files := benchmarkTree(b)
filter := filepathfilter.New([]string{"lfs"}, nil)
for i := 0; i < b.N; i++ {
for _, f := range files {
filter.Allows(f)
}
}
}
func BenchmarkPatternSimplePath(b *testing.B) {
files := benchmarkTree(b)
pattern := filepathfilter.NewPattern("lfs")
for i := 0; i < b.N; i++ {
for _, f := range files {
pattern.Match(f)
}
}
}
func BenchmarkFilterSimpleExtension(b *testing.B) {
files := benchmarkTree(b)
filter := filepathfilter.New([]string{"*.go"}, nil)
for i := 0; i < b.N; i++ {
for _, f := range files {
filter.Allows(f)
}
}
}
func BenchmarkPatternSimpleExtension(b *testing.B) {
files := benchmarkTree(b)
pattern := filepathfilter.NewPattern("*.go")
for i := 0; i < b.N; i++ {
for _, f := range files {
pattern.Match(f)
}
}
}
func BenchmarkFilterComplexExtension(b *testing.B) {
files := benchmarkTree(b)
filter := filepathfilter.New([]string{"*.travis.yml"}, nil)
for i := 0; i < b.N; i++ {
for _, f := range files {
filter.Allows(f)
}
}
}
func BenchmarkPatternComplexExtension(b *testing.B) {
files := benchmarkTree(b)
pattern := filepathfilter.NewPattern("*.travis.yml")
for i := 0; i < b.N; i++ {
for _, f := range files {
pattern.Match(f)
}
}
}
func BenchmarkFilterDoubleAsterisk(b *testing.B) {
files := benchmarkTree(b)
filter := filepathfilter.New([]string{"**/README.md"}, nil)
for i := 0; i < b.N; i++ {
for _, f := range files {
filter.Allows(f)
}
}
}
func BenchmarkPatternDoubleAsterisk(b *testing.B) {
files := benchmarkTree(b)
pattern := filepathfilter.NewPattern("**/README.md")
for i := 0; i < b.N; i++ {
for _, f := range files {
pattern.Match(f)
}
}
}
var (
benchmarkFiles []string
benchmarkMu sync.Mutex
)
func benchmarkTree(b *testing.B) []string {
benchmarkMu.Lock()
defer benchmarkMu.Unlock()
if benchmarkFiles != nil {
return benchmarkFiles
}
wd, err := os.Getwd()
if err != nil {
b.Fatal(err)
}
hasErrors := false
tools.FastWalkGitRepo(filepath.Dir(wd), func(parent string, info os.FileInfo, err error) {
if err != nil {
hasErrors = true
b.Error(err)
return
}
benchmarkFiles = append(benchmarkFiles, filepath.Join(parent, info.Name()))
})
if hasErrors {
b.Fatal("has errors :(")
}
return benchmarkFiles
}
|
package main
type yplane struct {
Loc float64 `json:"loc"`
Reflectiveness float64 `json:"reflectiveness"`
Red float64 `json:"red"`
Green float64 `json:"green"`
Blue float64 `json:"blue"`
}
func (y *yplane) getReflectiveness() float64 {
return y.Reflectiveness
}
func (y *yplane) getColorRaw() (float64, float64, float64) {
return y.Red, y.Green, y.Blue
}
func (y *yplane) getUnitNormal(point *vector) *vector {
return &vector{0.0, 1.0, 0.0}
}
func (y *yplane) intersected(c_ray *ray) (float64, bool) {
if c_ray.direction.Y == 0.0 {
return 0.0, false
}
t := (y.Loc - c_ray.start.Y) / c_ray.direction.Y
t = in_buffer(t)
if t <= 0.0 {
return 0.0, false
}
return t, true
}
|
// Code generated; DANGER ZONE FOR EDITS
package data
import (
"bytes"
"encoding/json"
"fmt"
"gopkg.in/yaml.v2"
)
const FactionDefinitionName = "faction"
type FactionDefinitions map[string]FactionDefinition
func (d FactionDefinitions) Keys() (out []string) {
for k := range d {
out = append(out, k)
}
return out
}
func (d FactionDefinitions) Values() (out []FactionDefinition) {
for _, v := range d {
out = append(out, v)
}
return out
}
func (d FactionDefinitions) Find(id int) (out FactionDefinition) {
for k, v := range d {
if k == fmt.Sprint(id) {
return v
}
}
return FactionDefinition{}
}
func (d FactionDefinitions) Name() string {
return FactionDefinitionName
}
type FactionDefinition struct {
Blacklisted bool `json:"blacklisted" yaml:"blacklisted,omitempty"`
DisplayProperties DisplayProperties `json:"displayProperties" yaml:"displayProperties,omitempty"`
Hash int `json:"hash" yaml:"hash,omitempty"`
Index int `json:"index" yaml:"index,omitempty"`
ProgressionHash int `json:"progressionHash" yaml:"progressionHash,omitempty"`
Redacted bool `json:"redacted" yaml:"redacted,omitempty"`
RewardItemHash int64 `json:"rewardItemHash" yaml:"rewardItemHash,omitempty"`
RewardVendorHash int64 `json:"rewardVendorHash" yaml:"rewardVendorHash,omitempty"`
TokenValues map[string]interface{} `json:"tokenValues" yaml:"tokenValues,omitempty"`
Vendors []Vendors `json:"vendors" yaml:"vendors,omitempty"`
}
type Vendors struct {
BackgroundImagePath string `json:"backgroundImagePath" yaml:"backgroundImagePath,omitempty"`
DestinationHash int64 `json:"destinationHash" yaml:"destinationHash,omitempty"`
VendorHash int64 `json:"vendorHash" yaml:"vendorHash,omitempty"`
}
func (d FactionDefinition) Name() string {
return FactionDefinitionName
}
func (d FactionDefinition) Json() ([]byte, error) {
return json.Marshal(d)
}
func (d FactionDefinition) PrettyJson() ([]byte, error) {
jout, err := d.Json()
if err != nil {
return nil, err
}
var pretty bytes.Buffer
if err := json.Indent(&pretty, jout, "", " "); err != nil {
return nil, err
}
return pretty.Bytes(), nil
}
func (d FactionDefinition) Yaml() ([]byte, error) {
return yaml.Marshal(d)
}
|
package zh
import (
"regexp"
"strconv"
"time"
"github.com/olebedev/when/rules"
)
/*
"上午 5点"
"上午 5 点"
"下午 3点"
"下午 3 点"
"下午 3点半"
"下午 3点30"
"下午 3:30"
"下午 3:30"
"下午 三点半"
*/
func HourMinute(s rules.Strategy) rules.Rule {
return &rules.F{
RegExp: regexp.MustCompile("(?i)" +
"(?:(凌\\s*晨|早\\s*晨|早\\s*上|上\\s*午|下\\s*午|晚\\s*上|今晚)?\\s*)" +
"((?:[0-1]{0,1}[0-9])|(?:2[0-3]))?" + "(?:\\s*)" +
"(" + INTEGER_WORDS_PATTERN[3:] + "?" +
"(\\:|:|\\-|点)" +
"((?:[0-5][0-9]))?" +
"(" + INTEGER_WORDS_PATTERN + "+)?" +
"(?:\\W|$)"),
Applier: func(m *rules.Match, c *rules.Context, o *rules.Options, ref time.Time) (bool, error) {
if (c.Hour != nil || c.Minute != nil) && s != rules.Override {
return false, nil
}
hour, exist := INTEGER_WORDS[m.Captures[2]] // 中文
if !exist {
hour, _ = strconv.Atoi(m.Captures[1])
}
if hour > 24 {
return false, nil
}
minutes, exist := INTEGER_WORDS[m.Captures[5]]
if !exist {
minutes, _ = strconv.Atoi(m.Captures[4])
}
if minutes > 59 {
return false, nil
}
c.Minute = &minutes
lower := compressStr(m.Captures[0])
switch lower {
case "上午", "凌晨", "早晨", "早上":
c.Hour = &hour
case "下午", "晚上", "今晚":
if hour < 12 {
hour += 12
}
c.Hour = &hour
case "":
if hour > 23 {
return false, nil
}
c.Hour = &hour
}
return true, nil
},
}
}
|
package scene
import (
bsplib "github.com/galaco/bsp"
"github.com/galaco/bsp/lumps"
"github.com/galaco/lambda-client/internal/config"
"github.com/galaco/lambda-client/scene/visibility"
"github.com/galaco/lambda-client/scene/world"
"github.com/galaco/lambda-core/entity"
"github.com/galaco/lambda-core/lib/util"
"github.com/galaco/lambda-core/loader"
entity2 "github.com/galaco/lambda-core/loader/entity"
"github.com/galaco/lambda-core/model"
entitylib "github.com/galaco/source-tools-common/entity"
"github.com/go-gl/mathgl/mgl32"
"github.com/galaco/filesystem"
)
func LoadFromFile(fileName string, fs *filesystem.FileSystem) {
newScene := Get()
bspData, err := bsplib.ReadFromFile(fileName)
if err != nil {
util.Logger().Panic(err)
}
if bspData.Header().Version < 19 {
util.Logger().Panic("Unsupported BSP Version. Exiting...")
}
//Set pakfile for filesystem
fs.RegisterPakFile(bspData.Lump(bsplib.LumpPakfile).(*lumps.Pakfile))
loadWorld(newScene, bspData, fs)
loadEntities(newScene, bspData.Lump(bsplib.LumpEntities).(*lumps.EntData), fs)
loadCamera(newScene)
}
func loadWorld(targetScene *Scene, file *bsplib.Bsp, fs *filesystem.FileSystem) {
baseWorld := loader.LoadMap(fs, file)
baseWorldBsp := baseWorld.Bsp()
baseWorldBspFaces := baseWorldBsp.ClusterLeafs()[0].Faces
baseWorldStaticProps := baseWorld.StaticProps()
visData := visibility.NewVisFromBSP(file)
bspClusters := make([]model.ClusterLeaf, visData.VisibilityLump.NumClusters)
defaultCluster := model.ClusterLeaf{
Id: 32767,
}
for _, bspLeaf := range visData.Leafs {
for _, leafFace := range visData.LeafFaces[bspLeaf.FirstLeafFace : bspLeaf.FirstLeafFace+bspLeaf.NumLeafFaces] {
if bspLeaf.Cluster == -1 {
//defaultCluster.Faces = append(defaultCluster.Faces, bspFaces[leafFace])
continue
}
bspClusters[bspLeaf.Cluster].Id = bspLeaf.Cluster
bspClusters[bspLeaf.Cluster].Faces = append(bspClusters[bspLeaf.Cluster].Faces, baseWorldBspFaces[leafFace])
bspClusters[bspLeaf.Cluster].Mins = mgl32.Vec3{
float32(bspLeaf.Mins[0]),
float32(bspLeaf.Mins[1]),
float32(bspLeaf.Mins[2]),
}
bspClusters[bspLeaf.Cluster].Maxs = mgl32.Vec3{
float32(bspLeaf.Maxs[0]),
float32(bspLeaf.Maxs[1]),
float32(bspLeaf.Maxs[2]),
}
bspClusters[bspLeaf.Cluster].Origin = bspClusters[bspLeaf.Cluster].Mins.Add(bspClusters[bspLeaf.Cluster].Maxs.Sub(bspClusters[bspLeaf.Cluster].Mins))
}
}
// Assign staticprops to clusters
for idx, prop := range baseWorld.StaticProps() {
for _, leafId := range prop.LeafList() {
clusterId := visData.Leafs[leafId].Cluster
if clusterId == -1 {
defaultCluster.StaticProps = append(defaultCluster.StaticProps, &baseWorldStaticProps[idx])
continue
}
bspClusters[clusterId].StaticProps = append(bspClusters[clusterId].StaticProps, &baseWorldStaticProps[idx])
}
}
for _, idx := range baseWorldBsp.ClusterLeafs()[0].DispFaces {
defaultCluster.Faces = append(defaultCluster.Faces, baseWorldBspFaces[idx])
}
baseWorldBsp.SetClusterLeafs(bspClusters)
baseWorldBsp.SetDefaultCluster(defaultCluster)
targetScene.SetWorld(world.NewWorld(*baseWorld.Bsp(), baseWorld.StaticProps(), visData))
}
func loadEntities(targetScene *Scene, entdata *lumps.EntData, fs *filesystem.FileSystem) {
vmfEntityTree, err := entity2.ParseEntities(entdata.GetData())
if err != nil {
util.Logger().Panic(err)
}
entityList := entitylib.FromVmfNodeTree(vmfEntityTree.Unclassified)
util.Logger().Notice("Found %d entities\n", entityList.Length())
for i := 0; i < entityList.Length(); i++ {
targetScene.AddEntity(entity2.CreateEntity(entityList.Get(i), fs))
}
skyCamera := entityList.FindByKeyValue("classname", "sky_camera")
if skyCamera == nil {
return
}
worldSpawn := entityList.FindByKeyValue("classname", "worldspawn")
if worldSpawn == nil {
return
}
targetScene.world.BuildSkybox(
loader.LoadSky(worldSpawn.ValueForKey("skyname"), fs),
skyCamera.VectorForKey("origin"),
float32(skyCamera.IntForKey("scale")))
}
func loadCamera(targetScene *Scene) {
targetScene.AddCamera(entity.NewCamera(mgl32.DegToRad(70), float32(config.Get().Video.Width)/float32(config.Get().Video.Height)))
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package patchpanel interacts with the patchpanel system daemon.
package patchpanel
import (
"context"
"os"
"github.com/godbus/dbus/v5"
"github.com/golang/protobuf/proto"
pp "chromiumos/system_api/patchpanel_proto"
"chromiumos/tast/errors"
"chromiumos/tast/local/dbusutil"
"chromiumos/tast/local/upstart"
)
const (
jobName = "patchpanel"
dbusName = "org.chromium.PatchPanel"
dbusPath = "/org/chromium/PatchPanel"
connectNamespaceMethod = "org.chromium.PatchPanel.ConnectNamespace"
getDevicesMethod = "org.chromium.PatchPanel.GetDevices"
getTrafficCountersMethod = "org.chromium.PatchPanel.GetTrafficCounters"
terminaVMStartupMethod = "org.chromium.PatchPanel.TerminaVmStartup"
terminaVMShutdownMethod = "org.chromium.PatchPanel.TerminaVmShutdown"
)
// Client is a wrapper around patchpanel DBus API.
type Client struct {
conn *dbus.Conn
obj dbus.BusObject
}
// New connects to the patchpanel daemon via D-Bus and returns a patchpanel Client object.
// TODO(crbug.com/1135106): Implement missing patchpanel D-Bus API methods.
func New(ctx context.Context) (*Client, error) {
if err := upstart.EnsureJobRunning(ctx, jobName); err != nil {
return nil, err
}
conn, obj, err := dbusutil.Connect(ctx, dbusName, dbusPath)
if err != nil {
return nil, err
}
return &Client{conn, obj}, nil
}
// ConnectNamespace sends a ConnectNamespaceRequest for the given namespace pid. Returns a pair with
// a open file descriptor and the ConnectNamespaceResponse proto message received if the request succeeded.
// Closing the file descriptor will teardown the veth and routing setup and free the allocated IPv4 subnet.
func (c *Client) ConnectNamespace(ctx context.Context, pid int32, outboundPhysicalDevice string,
forwardUserTraffic bool) (local *os.File, response *pp.ConnectNamespaceResponse, retErr error) {
request := &pp.ConnectNamespaceRequest{
Pid: pid,
OutboundPhysicalDevice: outboundPhysicalDevice,
AllowUserTraffic: forwardUserTraffic,
}
buf, err := proto.Marshal(request)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed marshaling %s request", connectNamespaceMethod)
}
local, remote, err := os.Pipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to open pipe for creating %s request arg", connectNamespaceMethod)
}
remoteFd := dbus.UnixFD(remote.Fd())
defer remote.Close()
defer func() {
if retErr != nil {
local.Close()
}
}()
if retErr = c.obj.CallWithContext(ctx, connectNamespaceMethod, 0, buf, remoteFd).Store(&buf); retErr != nil {
return nil, nil, errors.Wrapf(retErr, "failed reading %s response", connectNamespaceMethod)
}
response = &pp.ConnectNamespaceResponse{}
if retErr = proto.Unmarshal(buf, response); retErr != nil {
return nil, nil, errors.Wrapf(retErr, "failed unmarshaling %s response", connectNamespaceMethod)
}
return local, response, nil
}
// NotifyTerminaVMStartup sends a TerminaVmStartupRequest for the given container id. The ID must be unique in the system.
func (c *Client) NotifyTerminaVMStartup(ctx context.Context, cid uint32) (response *pp.TerminaVmStartupResponse, retErr error) {
request := &pp.TerminaVmStartupRequest{
Cid: cid,
}
buf, err := proto.Marshal(request)
if err != nil {
return nil, errors.Wrapf(err, "failed marshaling %s request", terminaVMStartupMethod)
}
var state []uint8
if retErr = c.obj.CallWithContext(ctx, terminaVMStartupMethod, 0, buf).Store(&state); retErr != nil {
// Send a shutdown request as we cannot tell if it failed before or after patchpanel allocates a FD.
c.NotifyTerminaVMShutdown(ctx, cid)
return nil, errors.Wrapf(retErr, "failed reading %s response", terminaVMStartupMethod)
}
response = &pp.TerminaVmStartupResponse{}
if retErr = proto.Unmarshal(state, response); retErr != nil {
return nil, errors.Wrapf(retErr, "failed unmarshaling %s response", terminaVMStartupMethod)
}
return response, nil
}
// NotifyTerminaVMShutdown sends a TerminaVmShutdownRequest for the given container id.
func (c *Client) NotifyTerminaVMShutdown(ctx context.Context, cid uint32) error {
request := &pp.TerminaVmShutdownRequest{
Cid: cid,
}
buf, err := proto.Marshal(request)
if err != nil {
return errors.Wrapf(err, "failed marshaling %s request", terminaVMShutdownMethod)
}
var state []uint8
if err = c.obj.CallWithContext(ctx, terminaVMShutdownMethod, 0, buf).Store(&state); err != nil {
return errors.Wrapf(err, "failed reading %s response", terminaVMShutdownMethod)
}
response := &pp.TerminaVmShutdownResponse{}
if err = proto.Unmarshal(state, response); err != nil {
return errors.Wrapf(err, "failed unmarshaling %s response", terminaVMShutdownMethod)
}
return nil
}
// GetDevices gets all patchpanel managed devices information.
func (c *Client) GetDevices(ctx context.Context) (*pp.GetDevicesResponse, error) {
request := &pp.GetDevicesRequest{}
buf, err := proto.Marshal(request)
if err != nil {
return nil, errors.Wrapf(err, "failed marshaling %s request", getDevicesMethod)
}
var result []uint8
if err = c.obj.CallWithContext(ctx, getDevicesMethod, 0, buf).Store(&result); err != nil {
return nil, errors.Wrapf(err, "failed reading %s response", getDevicesMethod)
}
response := &pp.GetDevicesResponse{}
if err = proto.Unmarshal(result, response); err != nil {
return nil, errors.Wrapf(err, "failed unmarshaling %s response", getDevicesMethod)
}
return response, nil
}
// GetTrafficCounters retrieves the current traffic counters for the specified devices.
func (c *Client) GetTrafficCounters(ctx context.Context, devices []string) (*pp.TrafficCountersResponse, error) {
request := &pp.TrafficCountersRequest{
Devices: devices,
}
buf, err := proto.Marshal(request)
if err != nil {
return nil, errors.Wrapf(err, "failed marshaling %s request", getTrafficCountersMethod)
}
var result []uint8
if err = c.obj.CallWithContext(ctx, getTrafficCountersMethod, 0, buf).Store(&result); err != nil {
return nil, errors.Wrapf(err, "failed reading %s response", getTrafficCountersMethod)
}
response := &pp.TrafficCountersResponse{}
if err = proto.Unmarshal(result, response); err != nil {
return nil, errors.Wrapf(err, "failed unmarshaling %s response", getTrafficCountersMethod)
}
return response, nil
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package security
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/shirou/gopsutil/v3/process"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/sysutil"
"chromiumos/tast/shutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: PtraceProcess,
Desc: "Checks that the kernel restricts ptrace between processes",
Contacts: []string{
"jorgelo@chromium.org", // Security team
"chromeos-security@google.com",
},
Attr: []string{"group:mainline"},
})
}
func PtraceProcess(ctx context.Context, s *testing.State) {
const (
sleeperPath = "/usr/local/libexec/tast/helpers/local/cros/security.PtraceProcess.sleeper"
sleepTime = 120 * time.Second
unprivUser = "chronos"
unprivUID = sysutil.ChronosUID
unprivGID = sysutil.ChronosGID
)
const sysctl = "/proc/sys/kernel/yama/ptrace_scope"
b, err := ioutil.ReadFile(sysctl)
if err != nil {
s.Fatalf("Failed to read %v: %v", sysctl, err)
}
if str := strings.TrimSpace(string(b)); str != "1" {
s.Fatalf("%v contains %q; want \"1\"", sysctl, str)
}
// userCmd returns a testexec.Cmd for running the supplied executable as an unprivileged user.
userCmd := func(exe string, args ...string) *testexec.Cmd {
cmd := testexec.CommandContext(ctx, exe, args...)
cmd.Cred(syscall.Credential{Uid: unprivUID, Gid: unprivGID})
return cmd
}
s.Log("Testing ptrace direct child")
cmd := userCmd("gdb", "-ex", "run", "-ex", "quit", "--batch", sleeperPath)
if out, err := cmd.CombinedOutput(testexec.DumpLogOnError); err != nil {
s.Error("Using gdb to start direct child failed: ", err)
} else if !strings.Contains(string(out), "Quit anyway") {
s.Error("ptrace direct child disallowed")
}
// attachGDB attempts to run a gdb process that attaches to pid.
// shouldAllow describes whether ptrace is expected to be allowed or disallowed.
attachGDB := func(pid int, shouldAllow bool) error {
testing.ContextLog(ctx, "Attaching gdb to ", pid)
cmd := userCmd("gdb", "-ex", "attach "+strconv.Itoa(pid), "-ex", "quit", "--batch")
out, err := cmd.CombinedOutput(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "attaching gdb failed")
}
// After attaching, gdb prints a message like this at exit:
//
// A debugging session is active.
//
// Inferior 1 [process 26416] will be detached.
//
// Quit anyway? (y or n)
allowed := strings.Contains(string(out), "A debugging session is active.")
if !allowed && !strings.Contains(string(out), "ptrace: Operation not permitted") {
fn := fmt.Sprintf("gdb-%d.txt", pid)
ioutil.WriteFile(filepath.Join(s.OutDir(), fn), out, 0644)
return errors.New("failed determining if ptrace was allowed; see " + fn)
}
if shouldAllow && !allowed {
return errors.New("ptrace disallowed")
}
if !shouldAllow && allowed {
return errors.New("ptrace allowed")
}
return nil
}
s.Log("Starting sleep process")
sleepCmd := userCmd("sleep", strconv.Itoa(int(sleepTime.Seconds())))
if err := sleepCmd.Start(); err != nil {
s.Fatal("Failed to start sleep: ", err)
}
defer sleepCmd.Wait()
defer sleepCmd.Kill()
sleepPID := sleepCmd.Process.Pid
s.Log("Testing ptrace cousin")
if err := attachGDB(sleepPID, false); err != nil {
s.Error("ptrace cousin: ", err)
}
s.Log("Testing cousin visibility in /proc")
procPath := fmt.Sprintf("/proc/%d/exe", sleepPID)
if err := userCmd("ls", "-la", procPath).Run(testexec.DumpLogOnError); err != nil {
s.Error("Cousin not visible in /proc: ", err)
} else {
s.Log("Cousin visible in /proc (as expected)")
}
s.Log("Testing ptrace init")
if err := attachGDB(1, false); err != nil {
s.Error("ptrace init: ", err)
}
s.Log("Testing init visibility in /proc")
if err := userCmd("ls", "-la", "/proc/1/exe").Run(); err != nil {
s.Log("init not visible in /proc (as expected)")
} else {
s.Error("init visible in /proc")
}
// startSleeper starts the "sleeper" executable from the security_tests package as unprivUser.
// The process calls prctl(PR_SET_PTRACER, tracerPID, ...).
// If pidns is true, the process runs in a PID namespace; otherwise it is executed directly.
// The returned command is started already; the caller must call its Kill and Wait methods.
// It corresponds to the minijail0 process if pidns is true or the sleeper process otherwise.
startSleeper := func(tracerPID int, pidns bool) (*testexec.Cmd, error) {
args := []string{sleeperPath, strconv.Itoa(tracerPID), strconv.Itoa(int(sleepTime.Seconds()))}
var cmd *testexec.Cmd
if pidns {
cmd = testexec.CommandContext(ctx, "minijail0", "-p", "--", "/bin/su", "-c",
shutil.EscapeSlice(args), unprivUser)
} else {
cmd = userCmd(args[0], args[1:]...)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Wrap(err, "failed to create sleeper stdout pipe")
}
testing.ContextLog(ctx, "Starting sleeper")
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, "failed to start sleeper")
}
// Wait for the process to write "ready\n" to stdout to indicate that it's ready.
ch := make(chan error, 1)
go func() {
const msg = "ready\n"
b := make([]byte, len(msg))
if _, err := io.ReadFull(stdout, b); err != nil {
ch <- err
} else if string(b) != msg {
ch <- errors.Errorf("sleeper wrote %q", b)
} else {
ch <- nil
}
}()
select {
case <-ctx.Done():
err = ctx.Err()
case err = <-ch:
}
if err != nil {
cmd.Kill()
cmd.Wait(testexec.DumpLogOnError)
return nil, errors.Wrap(err, "failed waiting for sleeper to start")
}
return cmd, nil
}
// testSetPtracer starts the "sleeper" executable with the supplied tracerPID argument
// and passes the process's PID and shouldAllow to attachGDB.
testSetPtracer := func(tracerPID int, shouldAllow bool) error {
sleeperCmd, err := startSleeper(tracerPID, false)
if err != nil {
return err
}
defer sleeperCmd.Wait()
defer sleeperCmd.Kill()
return attachGDB(sleeperCmd.Process.Pid, shouldAllow)
}
s.Log("Testing prctl(PR_SET_PTRACER, 0, ...)")
if err := testSetPtracer(0, false); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, 0, ...): ", err)
}
s.Log("Testing prctl(PR_SET_PTRACER, parent, ...)")
if err := testSetPtracer(os.Getpid(), true); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, parent, ...): ", err)
}
s.Log("Testing prctl(PR_SET_PTRACER, 1, ...)")
if err := testSetPtracer(1, true); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, 1, ...): ", err)
}
s.Log("Testing prctl(PR_SET_PTRACER, -1, ...)")
if err := testSetPtracer(-1, true); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, -1, ...): ", err)
}
// hasAncestor returns true if pid has the specified ancestor.
hasAncestor := func(pid, ancestor int32) (bool, error) {
for {
proc, err := process.NewProcess(pid)
if err != nil {
return false, err
}
ppid, err := proc.Ppid()
if err != nil {
return false, err
}
if ppid == 0 {
return false, nil
}
if ppid == ancestor {
return true, nil
}
pid = ppid
}
}
// testSetPtracerPidns is similar to testSetPtracer, but runs the sleeper executable in a PID namespace.
testSetPtracerPidns := func(tracerPID int, shouldAllow bool) error {
minijailCmd, err := startSleeper(tracerPID, true)
if err != nil {
return err
}
defer minijailCmd.Wait()
defer minijailCmd.Kill()
// Find the sleeper process, which will be nested under minijail0 and su.
sleeperPID := -1
procs, err := process.Processes()
if err != nil {
return errors.Wrap(err, "failed listing procesess")
}
for _, proc := range procs {
if exe, err := proc.Exe(); err != nil || exe != sleeperPath {
continue
}
if ok, err := hasAncestor(proc.Pid, int32(minijailCmd.Process.Pid)); err != nil || !ok {
continue
}
sleeperPID = int(proc.Pid)
break
}
if sleeperPID == -1 {
return errors.Errorf("didn't find sleeper process under minijail0 process %d", minijailCmd.Process.Pid)
}
return attachGDB(sleeperPID, shouldAllow)
}
s.Log("Testing prctl(PR_SET_PTRACER, 0, ...) across pidns")
if err := testSetPtracerPidns(0, false); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, 0, ...) across pidns: ", err)
}
s.Log("Testing prctl(PR_SET_PTRACER, -1, ...) across pidns")
if err := testSetPtracerPidns(-1, true); err != nil {
s.Error("ptrace after prctl(PR_SET_PTRACER, -1, ...) across pidns: ", err)
}
}
|
package xin
import (
crand "crypto/rand"
"encoding/binary"
"math/rand"
)
func cryptoRandForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
buf64 := make([]byte, 8)
_, err := crand.Read(buf64)
if err != nil {
return zeroValue, nil
}
n, readBytes := binary.Varint(buf64)
if readBytes <= 0 {
return zeroValue, nil
}
r := rand.New(rand.NewSource(n))
return FracValue(r.Float64()), nil
}
|
/*
Copyright 2014 Huawei Technologies Co., Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"encoding/json"
"net/http"
"gopkg.in/macaron.v1"
)
// Global search virtual image, not include private image.
func ImageGlobalSearchV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageDiscoveryV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageScopedSearchV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageGetListV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageGetFileV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageGetManifestsV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImagePostV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImagePutFileV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImagePutManifestV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImagePatchFileV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
//
func ImageDeleteFileV1Handler(ctx *macaron.Context) (int, []byte) {
result, _ := json.Marshal(map[string]string{})
return http.StatusOK, result
}
|
package usecase
import (
"marketplace/ads/domain"
"github.com/go-pg/pg/v10"
)
type SearchAdsCmd func (db *pg.DB, user *domain.Account, keyword string) ([]domain.Ads, error)
func SearchAds() SearchAdsCmd {
return func (db *pg.DB, user *domain.Account, keyword string) ([]domain.Ads, error) {
var adsArray []domain.Ads
err := db.Model(&adsArray).
Where("ads.Title LIKE ?", "%" + keyword + "%").
WhereOr("ads.Description LIKE ?", "%" + keyword + "%").
Select()
if err != nil {
return []domain.Ads{}, err
}
return adsArray, nil
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package peer
import (
"bytes"
"container/list"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"github.com/bitmark-inc/bitmarkd/block"
"github.com/bitmark-inc/bitmarkd/blockheader"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/genesis"
"github.com/bitmark-inc/bitmarkd/messagebus"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/bitmarkd/peer/upstream"
"github.com/bitmark-inc/bitmarkd/peer/voting"
"github.com/bitmark-inc/bitmarkd/util"
"github.com/bitmark-inc/bitmarkd/zmqutil"
"github.com/bitmark-inc/logger"
)
// various timeouts
const (
// pause to limit bandwidth
cycleInterval = 15 * time.Second
// time out for connections
connectorTimeout = 60 * time.Second
// number of cycles to be 1 block out of sync before resync
samplingLimit = 6
// number of blocks to fetch in one set
fetchBlocksPerCycle = 200
// fail to fork if height difference is greater than this
forkProtection = 60
// do not proceed unless this many clients are connected
minimumClients = 5
// total number of dynamic clients
maximumDynamicClients = 25
// client should exist at least 1 response with in this number
activeTime = 60 * time.Second
// fast sync option to fetch block
fastSyncFetchBlocksPerCycle = 2000
fastSyncSkipPerBlocks = 100
fastSyncPivotBlocks = 1000
)
type connector struct {
sync.RWMutex
log *logger.L
preferIPv6 bool
staticClients []upstream.Upstream
dynamicClients list.List
state connectorState
theClient upstream.Upstream // client used for fetching blocks
startBlockNumber uint64 // block number where local chain forks
height uint64 // block number on best node
samples int // counter to detect missed block broadcast
votes voting.Voting
fastSyncEnabled bool // fast sync mode enabled?
blocksPerCycle int // number of blocks to fetch per cycle
pivotPoint uint64 // block number to stop fast syncing
}
// initialise the connector
func (conn *connector) initialise(
privateKey []byte,
publicKey []byte,
connect []Connection,
dynamicEnabled bool,
preferIPv6 bool,
fastSync bool,
) error {
log := logger.New("connector")
conn.log = log
conn.preferIPv6 = preferIPv6
conn.fastSyncEnabled = fastSync
log.Info("initialising…")
// allocate all sockets
staticCount := len(connect) // can be zero
if 0 == staticCount && !dynamicEnabled {
log.Error("zero static connections and dynamic is disabled")
return fault.NoConnectionsAvailable
}
conn.staticClients = make([]upstream.Upstream, staticCount)
// initially connect all static sockets
wg := sync.WaitGroup{}
errCh := make(chan error, len(connect))
conn.log.Debugf("static connection count: %d", len(connect))
for i, c := range connect {
wg.Add(1)
// start new goroutine for each connection
go func(conn *connector, c Connection, i int, wg *sync.WaitGroup, ch chan error) {
// error function call
errF := func(wg *sync.WaitGroup, ch chan error, e error) {
ch <- e
wg.Done()
}
// for canonicaling the error
canonicalErrF := func(c Connection, e error) error {
return fmt.Errorf("client: %q error: %s", c.Address, e)
}
address, err := util.NewConnection(c.Address)
if nil != err {
log.Errorf("client[%d]=address: %q error: %s", i, c.Address, err)
errF(wg, ch, canonicalErrF(c, err))
return
}
serverPublicKey, err := zmqutil.ReadPublicKey(c.PublicKey)
if nil != err {
log.Errorf("client[%d]=public: %q error: %s", i, c.PublicKey, err)
errF(wg, ch, canonicalErrF(c, err))
return
}
// prevent connection to self
if bytes.Equal(publicKey, serverPublicKey) {
err := fault.ConnectingToSelfForbidden
log.Errorf("client[%d]=public: %q error: %s", i, c.PublicKey, err)
errF(wg, ch, canonicalErrF(c, err))
return
}
client, err := upstream.New(privateKey, publicKey, connectorTimeout)
if nil != err {
log.Errorf("client[%d]=%q error: %s", i, address, err)
errF(wg, ch, canonicalErrF(c, err))
return
}
conn.Lock()
conn.staticClients[i] = client
globalData.connectorClients = append(globalData.connectorClients, client)
conn.Unlock()
err = client.Connect(address, serverPublicKey)
if nil != err {
log.Errorf("connect[%d]=%q error: %s", i, address, err)
errF(wg, ch, canonicalErrF(c, err))
return
}
log.Infof("public key: %x at: %q", serverPublicKey, c.Address)
wg.Done()
}(conn, c, i, &wg, errCh)
}
conn.log.Debug("waiting for all static connections...")
wg.Wait()
// drop error channel for getting all errors
errs := make([]error, 0)
for len(errCh) > 0 {
errs = append(errs, <-errCh)
}
// error code for goto fail
err := error(nil)
if len(errs) == 1 {
err = errs[0]
goto fail
} else if len(errs) > 1 {
err = compositeError(errs)
goto fail
}
// just create sockets for dynamic clients
for i := 0; i < maximumDynamicClients; i++ {
client, e := upstream.New(privateKey, publicKey, connectorTimeout)
if nil != err {
log.Errorf("client[%d] error: %s", i, e)
err = e
goto fail
}
// create list of all dynamic clients
conn.dynamicClients.PushBack(client)
globalData.connectorClients = append(globalData.connectorClients, client)
}
conn.votes = voting.NewVoting()
// start state machine
conn.nextState(cStateConnecting)
return nil
// error handling
fail:
conn.destroy()
return err
}
// combine multi error into one
func compositeError(errors []error) error {
if nil == errors || 0 == len(errors) {
return nil
}
var ce strings.Builder
ce.WriteString("composite error: [")
len := len(errors)
for i, e := range errors {
ce.WriteString(e.Error())
if i < len-1 {
ce.WriteString(", ")
}
}
ce.WriteString("]")
return fmt.Errorf(ce.String())
}
func (conn *connector) allClients(
f func(client upstream.Upstream, e *list.Element),
) {
for _, client := range conn.staticClients {
if client != nil {
f(client, nil)
}
}
for e := conn.dynamicClients.Front(); nil != e; e = e.Next() {
if client := e.Value.(upstream.Upstream); client != nil {
f(client, e)
}
}
}
func (conn *connector) searchClients(
f func(client upstream.Upstream, e *list.Element) bool,
) {
for _, client := range conn.staticClients {
if f(client, nil) {
return
}
}
for e := conn.dynamicClients.Front(); nil != e; e = e.Next() {
if f(e.Value.(upstream.Upstream), e) {
return
}
}
}
func (conn *connector) destroy() {
conn.allClients(func(client upstream.Upstream, e *list.Element) {
client.Destroy()
})
}
// various RPC calls to upstream connections
func (conn *connector) Run(args interface{}, shutdown <-chan struct{}) {
log := conn.log
log.Info("starting…")
queue := messagebus.Bus.Connector.Chan()
timer := time.After(cycleInterval)
loop:
for {
// wait for shutdown
log.Debug("waiting…")
select {
case <-shutdown:
break loop
case <-timer: // timer has priority over queue
timer = time.After(cycleInterval)
conn.process()
case item := <-queue:
c, _ := util.PackedConnection(item.Parameters[1]).Unpack()
conn.log.Debugf(
"received control: %s public key: %x connect: %x %q",
item.Command,
item.Parameters[0],
item.Parameters[1],
c,
)
switch item.Command {
case "@D": // internal command: delete a peer
conn.releaseServerKey(item.Parameters[0])
conn.log.Infof(
"connector receive server public key: %x",
item.Parameters[0],
)
default:
err := conn.connectUpstream(
item.Command,
item.Parameters[0],
item.Parameters[1],
)
if nil != err {
conn.log.Warnf("connect upstream error: %s", err)
}
}
}
}
log.Info("shutting down…")
conn.destroy()
log.Info("stopped")
}
// process the connect and return response
func (conn *connector) process() {
// run the machine until it pauses
for conn.runStateMachine() {
}
}
// run state machine
// return:
// true if want more cycles
// false to pase for I/O
func (conn *connector) runStateMachine() bool {
log := conn.log
log.Infof("current state: %s", conn.state)
continueLooping := true
switch conn.state {
case cStateConnecting:
mode.Set(mode.Resynchronise)
globalData.clientCount = conn.getConnectedClientCount()
log.Infof("connections: %d", globalData.clientCount)
if isConnectionEnough(globalData.clientCount) {
conn.nextState(cStateHighestBlock)
} else {
log.Warnf("connections: %d below minimum client count: %d", globalData.clientCount, minimumClients)
messagebus.Bus.Announce.Send("reconnect")
}
continueLooping = false
case cStateHighestBlock:
if conn.updateHeightAndClient() {
log.Infof("highest block number: %d client: %s", conn.height, conn.theClient.Name())
if conn.hasBetterChain(blockheader.Height()) {
log.Infof("new chain from %s, height %d, digest %s", conn.theClient.Name(), conn.height, conn.theClient.CachedRemoteDigestOfLocalHeight().String())
log.Info("enter fork detect state")
conn.nextState(cStateForkDetect)
} else if conn.isSameChain() {
log.Info("remote same chain")
conn.nextState(cStateRebuild)
} else {
log.Info("remote chain invalid, stop looping for now")
continueLooping = false
}
} else {
log.Warn("highest block: connection lost")
conn.nextState(cStateConnecting)
continueLooping = false
}
case cStateForkDetect:
height := blockheader.Height()
if !conn.hasBetterChain(height) {
log.Info("remote without better chain, enter state rebuild")
conn.nextState(cStateRebuild)
} else {
// determine pivot point to stop fast sync
if conn.height > fastSyncPivotBlocks {
conn.pivotPoint = conn.height - fastSyncPivotBlocks
} else {
conn.pivotPoint = 0
}
log.Infof("Pivot point for fast sync: %d", conn.pivotPoint)
// first block number
conn.startBlockNumber = genesis.BlockNumber + 1
conn.nextState(cStateFetchBlocks) // assume success
log.Infof("local block number: %d", height)
blockheader.ClearCache()
// check digests of descending blocks (to detect a fork)
check_digests:
for h := height; h >= genesis.BlockNumber; h -= 1 {
digest, err := blockheader.DigestForBlock(h)
if nil != err {
log.Infof("block number: %d local digest error: %s", h, err)
conn.nextState(cStateHighestBlock) // retry
break check_digests
}
d, err := conn.theClient.RemoteDigestOfHeight(h)
if nil != err {
log.Infof("block number: %d fetch digest error: %s", h, err)
conn.nextState(cStateHighestBlock) // retry
break check_digests
} else if d == digest {
if height-h >= forkProtection {
log.Errorf("fork protection at: %d - %d >= %d", height, h, forkProtection)
conn.nextState(cStateHighestBlock)
break check_digests
}
conn.startBlockNumber = h + 1
log.Infof("fork from block number: %d", conn.startBlockNumber)
// remove old blocks
err := block.DeleteDownToBlock(conn.startBlockNumber)
if nil != err {
log.Errorf("delete down to block number: %d error: %s", conn.startBlockNumber, err)
conn.nextState(cStateHighestBlock) // retry
}
break check_digests
}
}
}
case cStateFetchBlocks:
continueLooping = false
var packedBlock []byte
var packedNextBlock []byte
// Check fast sync state on each loop
if conn.fastSyncEnabled && conn.pivotPoint >= conn.startBlockNumber+fastSyncFetchBlocksPerCycle {
conn.blocksPerCycle = fastSyncFetchBlocksPerCycle
} else {
conn.blocksPerCycle = fetchBlocksPerCycle
}
fetch_blocks:
for i := 0; i < conn.blocksPerCycle; i++ {
if conn.startBlockNumber > conn.height {
// just in case block height has changed
log.Infof("height changed from: %d to: %d", conn.height, conn.startBlockNumber)
conn.nextState(cStateHighestBlock)
continueLooping = true
break fetch_blocks
}
if conn.startBlockNumber%100 == 0 {
log.Warnf("fetch block number: %d", conn.startBlockNumber)
} else {
log.Infof("fetch block number: %d", conn.startBlockNumber)
}
if packedNextBlock == nil {
p, err := conn.theClient.GetBlockData(conn.startBlockNumber)
if nil != err {
log.Errorf("fetch block number: %d error: %s", conn.startBlockNumber, err)
conn.nextState(cStateHighestBlock) // retry
break fetch_blocks
}
packedBlock = p
} else {
packedBlock = packedNextBlock
}
if conn.fastSyncEnabled {
// test a random block for forgery
if i > 0 && i%fastSyncSkipPerBlocks == 0 {
h := conn.startBlockNumber - uint64(rand.Intn(fastSyncSkipPerBlocks))
log.Debugf("select random block: %d to test for forgery", h)
digest, err := blockheader.DigestForBlock(h)
if nil != err {
log.Infof("block number: %d local digest error: %s", h, err)
conn.nextState(cStateHighestBlock) // retry
break fetch_blocks
}
d, err := conn.theClient.RemoteDigestOfHeight(h)
if nil != err {
log.Infof("block number: %d fetch digest error: %s", h, err)
conn.nextState(cStateHighestBlock) // retry
break fetch_blocks
}
if d != digest {
log.Warnf("potetial block forgery: %d", h)
// remove old blocks
startingPoint := conn.startBlockNumber - uint64(i)
err := block.DeleteDownToBlock(startingPoint)
if nil != err {
log.Errorf("delete down to block number: %d error: %s", startingPoint, err)
}
conn.fastSyncEnabled = false
conn.nextState(cStateHighestBlock)
conn.startBlockNumber = startingPoint
break fetch_blocks
}
}
// get next block:
// packedNextBlock will be nil when local height is same as remote
var err error
packedNextBlock, err = conn.theClient.GetBlockData(conn.startBlockNumber + 1)
if nil != err {
log.Debugf("fetch next block number: %d error: %s", conn.startBlockNumber+1, err)
}
} else {
packedNextBlock = nil
}
log.Debugf("store block number: %d", conn.startBlockNumber)
err := block.StoreIncoming(packedBlock, packedNextBlock, block.NoRescanVerified)
if nil != err {
log.Errorf(
"store block number: %d error: %s",
conn.startBlockNumber,
err,
)
conn.nextState(cStateHighestBlock) // retry
break fetch_blocks
}
// next block
conn.startBlockNumber++
}
case cStateRebuild:
// return to normal operations
conn.nextState(cStateSampling)
conn.samples = 0 // zero out the counter
mode.Set(mode.Normal)
continueLooping = false
case cStateSampling:
// check peers
globalData.clientCount = conn.getConnectedClientCount()
if !isConnectionEnough(globalData.clientCount) {
log.Warnf("connections: %d below minimum client count: %d", globalData.clientCount, minimumClients)
continueLooping = true
conn.nextState(cStateConnecting)
return continueLooping
}
log.Infof("connections: %d", globalData.clientCount)
continueLooping = false
// check height
if conn.updateHeightAndClient() {
height := blockheader.Height()
log.Infof("height remote: %d, local: %d", conn.height, height)
if conn.hasBetterChain(height) {
log.Warn("check height: better chain")
conn.nextState(cStateForkDetect)
continueLooping = true
} else {
conn.samples = 0
}
} else {
conn.samples++
if conn.samples > samplingLimit {
log.Warn("check height: time to resync")
conn.nextState(cStateForkDetect)
continueLooping = true
}
}
}
return continueLooping
}
func isConnectionEnough(count int) bool {
return minimumClients <= count
}
func (conn *connector) isSameChain() bool {
if conn.theClient == nil {
conn.log.Debug("remote client empty")
return false
}
localDigest, err := blockheader.DigestForBlock(blockheader.Height())
if nil != err {
return false
}
if conn.height == blockheader.Height() && conn.theClient.CachedRemoteDigestOfLocalHeight() == localDigest {
return true
}
return false
}
func (conn *connector) hasBetterChain(localHeight uint64) bool {
if conn.theClient == nil {
conn.log.Debug("remote client empty")
return false
}
if conn.height < localHeight {
conn.log.Debugf("remote height %d is shorter than local height %d", conn.height, localHeight)
return false
}
if conn.height == localHeight && !conn.hasSmallerDigestThanLocal(localHeight) {
return false
}
return true
}
// different chain but with same height, possible fork exist
// choose the chain that has smaller digest
func (conn *connector) hasSmallerDigestThanLocal(localHeight uint64) bool {
remoteDigest := conn.theClient.CachedRemoteDigestOfLocalHeight()
// if upstream update during processing
if conn.theClient.LocalHeight() != localHeight {
conn.log.Warnf("remote height %d is different than local height %d", conn.theClient.LocalHeight(), localHeight)
return false
}
localDigest, err := blockheader.DigestForBlock(localHeight)
if nil != err {
conn.log.Warnf("local height: %d digest error: %s", localHeight, err)
return false
}
return remoteDigest.SmallerDigestThan(localDigest)
}
func (conn *connector) updateHeightAndClient() bool {
conn.votes.Reset()
conn.votes.SetMinHeight(blockheader.Height())
conn.startElection()
elected, height := conn.elected()
if 0 == height {
conn.height = 0
return false
}
winnerName := elected.Name()
remoteAddr, err := elected.RemoteAddr()
if nil != err {
conn.log.Warnf("%s socket not connected", winnerName)
conn.height = 0
return false
}
conn.log.Debugf("winner %s majority height %d, connect to %s",
winnerName,
height,
remoteAddr,
)
if height > 0 && nil != elected {
globalData.blockHeight = height
}
conn.theClient = elected
conn.height = height
return true
}
func (conn *connector) startElection() {
conn.allClients(func(client upstream.Upstream, e *list.Element) {
if client.IsConnected() && client.ActiveInThePast(activeTime) {
conn.votes.VoteBy(client)
}
})
}
func (conn *connector) elected() (upstream.Upstream, uint64) {
elected, height, err := conn.votes.ElectedCandidate()
if nil != err {
conn.log.Warnf("get elected with error: %s", err)
return nil, 0
}
remoteAddr, err := elected.RemoteAddr()
if nil != err {
conn.log.Errorf("get client string with error: %s", err)
return nil, 0
}
digest := elected.CachedRemoteDigestOfLocalHeight()
conn.log.Infof(
"digest: %s elected with %d votes, remote addr: %s, height: %d",
digest,
conn.votes.NumVoteOfDigest(digest),
remoteAddr,
height,
)
return elected, height
}
func (conn *connector) connectUpstream(
priority string,
serverPublicKey []byte,
addresses []byte,
) error {
log := conn.log
log.Debugf("connect: %s to: %x @ %x", priority, serverPublicKey, addresses)
// extract the first valid address
connV4, connV6 := util.PackedConnection(addresses).Unpack46()
// need to know if this node has IPv6
address := connV4
if nil != connV6 && conn.preferIPv6 {
address = connV6
}
if nil == address {
log.Errorf(
"reconnect: %x error: no suitable address found ipv6 allowed: %t",
serverPublicKey,
conn.preferIPv6,
)
return fault.AddressIsNil
}
log.Infof("connect: %s to: %x @ %s", priority, serverPublicKey, address)
// see if already connected to this node
alreadyConnected := false
conn.searchClients(func(client upstream.Upstream, e *list.Element) bool {
if client.IsConnectedTo(serverPublicKey) {
if nil == e {
log.Debugf(
"already have static connection to: %x @ %s",
serverPublicKey,
*address,
)
} else {
log.Debugf("ignore change to: %x @ %s", serverPublicKey, *address)
conn.dynamicClients.MoveToBack(e)
}
alreadyConnected = true
return true
}
return false
})
if alreadyConnected {
return nil
}
// reconnect the oldest entry to new node
log.Infof("reconnect: %x @ %s", serverPublicKey, *address)
client := conn.dynamicClients.Front().Value.(upstream.Upstream)
err := client.Connect(address, serverPublicKey)
if nil != err {
log.Errorf("ConnectTo: %x @ %s error: %s", serverPublicKey, *address, err)
} else {
conn.dynamicClients.MoveToBack(conn.dynamicClients.Front())
}
return err
}
func (conn *connector) releaseServerKey(serverPublicKey []byte) error {
log := conn.log
conn.searchClients(func(client upstream.Upstream, e *list.Element) bool {
if bytes.Equal(serverPublicKey, client.ServerPublicKey()) {
if e == nil { // static Clients
log.Infof("refuse to delete static peer: %x", serverPublicKey)
} else { // dynamic Clients
client.ResetServer()
log.Infof("peer: %x is released in upstream", serverPublicKey)
return true
}
}
return false
})
return nil
}
func (conn *connector) nextState(newState connectorState) {
conn.state = newState
}
func (conn *connector) getConnectedClientCount() int {
clientCount := 0
conn.allClients(func(client upstream.Upstream, e *list.Element) {
if client.IsConnected() {
clientCount++
}
})
return clientCount
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.