text
stringlengths 11
4.05M
|
|---|
// Based on: https://github.com/soupdiver/go-gitlab-webhook
// Gitea SDK: https://godoc.org/code.gitea.io/sdk/gitea
// Gitea webhooks: https://docs.gitea.io/en-us/webhooks
package main
import (
b64 "encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"strconv"
"syscall"
api "code.gitea.io/gitea/modules/structs"
)
//ConfigRepository represents a repository from the config file
type ConfigRepository struct {
Secret string
Name string
Commands []string
}
//Config represents the config file
type Config struct {
Logfile string
Address string
Port int64
Repositories []ConfigRepository
}
func panicIf(err error, what ...string) {
if err != nil {
if len(what) == 0 {
panic(err)
}
panic(errors.New(err.Error() + (" " + what[0])))
}
}
var config Config
var configFile string
func main() {
args := os.Args
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGHUP)
go func() {
<-sigc
config = loadConfig(configFile)
log.Println("config reloaded")
}()
//if we have a "real" argument we take this as conf path to the config file
if len(args) > 1 {
configFile = args[1]
} else {
configFile = "config.json"
}
//load config
config = loadConfig(configFile)
//open log file
writer, err := os.OpenFile(config.Logfile, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
panicIf(err)
//close logfile on exit
defer func() {
writer.Close()
}()
//setting logging output
log.SetOutput(writer)
//setting handler
http.HandleFunc("/", hookHandler)
address := config.Address + ":" + strconv.FormatInt(config.Port, 10)
log.Println("Listening on " + address)
//starting server
err = http.ListenAndServe(address, nil)
if err != nil {
log.Println(err)
}
}
func loadConfig(configFile string) Config {
var file, err = os.Open(configFile)
panicIf(err)
// close file on exit and check for its returned error
defer func() {
panicIf(file.Close())
}()
buffer := make([]byte, 1024)
count, err := file.Read(buffer)
panicIf(err)
err = json.Unmarshal(buffer[:count], &config)
panicIf(err)
return config
}
func hookHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if r := recover(); r != nil {
log.Println(r)
}
}()
//get the hook event from the headers
event := r.Header.Get("X-Gogs-Event")
if len(event) == 0 {
event = r.Header.Get("X-Gitea-Event")
}
//only push events are current supported
if event != "push" {
log.Printf("received unknown event \"%s\"\n", event)
return
}
//read request body
var data, err = ioutil.ReadAll(r.Body)
panicIf(err, "while reading request body")
//unmarshal request body
var hook api.PushPayload
err = json.Unmarshal(data, &hook)
panicIf(err, fmt.Sprintf("while unmarshaling request base64(%s)", b64.StdEncoding.EncodeToString(data)))
log.Printf("received webhook on %s", hook.Repo.FullName)
//find matching config for repository name
for _, repo := range config.Repositories {
if repo.Name == hook.Repo.FullName || repo.Name == hook.Repo.HTMLURL {
//check if the secret in the configuration matches the request
if repo.Secret != hook.Secret {
log.Printf("secret mismatch for repo %s\n", repo.Name)
continue
}
//execute commands for repository
for _, cmd := range repo.Commands {
var command = exec.Command(cmd)
out, err := command.Output()
if err != nil {
log.Println(err)
} else {
log.Println("Executed: " + cmd)
log.Println("Output: " + string(out))
}
}
}
}
}
|
package main
import (
"github.com/hashicorp/terraform-plugin-sdk/plugin"
"github.com/vadimDidenko/terraform-provider-upwork/upwork"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: upwork.Provider})
}
|
package utility
import (
"Perekoter/models"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/StefanSchroeder/Golang-Roman"
"github.com/parnurzeal/gorequest"
)
func Perekot(thread models.Thread) error {
config := Config.Get()
db := models.DB()
defer db.Close()
oldThread := thread.CurrentThread
threadID := strconv.Itoa(int(thread.ID))
urlPath := config.Base + "/makaba/posting.fcgi?json=1"
imgPath := "./covers/" + thread.Image
title, errTitle := createTitle(thread)
post, errPost := generatePost(thread)
file, errFile := ioutil.ReadFile(imgPath)
if (errTitle != nil) || (errPost != nil) || (errFile != nil) {
NewError("Failed to create content of thread " + threadID)
return errors.New("Not created")
}
cookie := http.Cookie{
Name: "passcode_auth",
Value: CurrentUsercode.Usercode,
}
request := gorequest.New()
_, body, errSend := request.Post(urlPath).
Type("multipart").
SendFile(file, thread.Image, "formimages[]").
Send("json=1").
Send("comment=" + post).
Send("task=post").
Send("board=" + thread.Board.Addr).
Send("thread=0").
Send("name=" + config.Botname).
Send("subject=" + title).
AddCookie(&cookie).
End()
if errSend != nil {
NewError("Failed to send Perekot (thread " + threadID + ")")
NewHistoryPoint("Failed to send Perekot (thread " + threadID + ")")
return errors.New("Perekot not sended")
}
var responseBody PostResponse
errFormate := json.Unmarshal([]byte(body), &responseBody)
if errFormate != nil {
NewError("Failed to send Perekot (thread " + threadID + ") - incorrect server response")
NewHistoryPoint("Failed to send Perekot (thread " + threadID + ") - incorrect server response")
return errors.New("Perekot not sended")
}
if responseBody.Error != 0 {
NewError("Failed to send Perekot (thread " + threadID + ") - error " + responseBody.Reason)
NewHistoryPoint("Failed to send Perekot (thread " + threadID + ") - error " + responseBody.Reason)
return errors.New("Perekot not sended")
}
NewHistoryPoint("Perekot was created (thread " + threadID + ")")
targetNum := responseBody.Target
updateThreadAddr(thread, targetNum)
updateThreadLastData(thread)
if thread.Numbering {
threadIncrement(thread)
}
if config.Notification {
time.Sleep(60 * time.Second)
notification(thread, oldThread, targetNum)
}
if config.OldLink {
time.Sleep(60 * time.Second)
oldLink(thread, oldThread, targetNum)
}
return nil
}
func createTitle(thread models.Thread) (string, error) {
title := thread.Title
currentNum := thread.CurrentNum + 1
if thread.Numbering {
if thread.Roman {
title += " " + thread.NumberingSymbol + roman.Roman(currentNum)
} else {
title += " " + thread.NumberingSymbol + strconv.Itoa(currentNum)
}
}
return title, nil
}
func generatePost(thread models.Thread) (string, error) {
var post string
if thread.HeaderLink {
request := gorequest.New()
_, body, errSend := request.Get(thread.Header).End()
if errSend != nil {
threadID := strconv.Itoa(int(thread.ID))
NewError("Failed to get the post header (thread " + threadID + ")")
return "", errors.New("Not created")
}
body = strings.Replace(body, "${old_thread}", ">>"+strconv.Itoa(thread.CurrentThread), -1)
post = body
} else {
body := strings.Replace(thread.Header, "${old_thread}", ">>"+strconv.Itoa(thread.CurrentThread), -1)
post = body
}
return post, nil
}
func generateNotification(newNum int) string {
notification := Config.Get().NotificationText + strconv.Itoa(newNum)
return notification
}
func generateOldLink(newNum int) string {
notification := Config.Get().OldLinkText + strconv.Itoa(newNum)
return notification
}
func notification(thread models.Thread, oldNum int, newNum int) {
config := Config.Get()
path := config.Base + "/makaba/posting.fcgi"
notification := generateNotification(newNum)
threadID := strconv.Itoa(int(thread.ID))
cookie := http.Cookie{
Name: "passcode_auth",
Value: CurrentUsercode.Usercode,
}
request := gorequest.New()
_, body, errSend := request.Post(path).
Type("multipart").
Send("task=post").
Send("board=" + thread.Board.Addr).
Send("thread=" + strconv.Itoa(oldNum)).
Send("name=" + config.Botname).
Send("subject=PEREKOT").
Send("comment=" + notification).
AddCookie(&cookie).
End()
if errSend != nil {
NewError("Failed to send notification (thread " + threadID + ")")
NewHistoryPoint("Failed to send notification (thread " + threadID + ")")
return
}
var responseBody PostResponse
errFormate := json.Unmarshal([]byte(body), &responseBody)
if errFormate != nil {
NewError("Failed to convert server response to JSON (notification in thread " + threadID + ")")
NewHistoryPoint("Failed to convert server response to JSON (notification in thread " + threadID + ")")
return
}
if responseBody.Error != 0 {
NewError("Failed to create notification (thread " + threadID + ") - error " + responseBody.Reason)
NewHistoryPoint("Failed to create notification (thread " + threadID + ") - error " + responseBody.Reason)
} else {
NewHistoryPoint("Notification in thread \"" + thread.Title + "\" was created")
}
}
func oldLink(thread models.Thread, oldNum int, newNum int) {
config := Config.Get()
path := config.Base + "/makaba/posting.fcgi"
notification := generateOldLink(oldNum)
threadID := strconv.Itoa(int(thread.ID))
cookie := http.Cookie{
Name: "passcode_auth",
Value: CurrentUsercode.Usercode,
}
request := gorequest.New()
_, body, errSend := request.Post(path).
Type("multipart").
Send("task=post").
Send("board=" + thread.Board.Addr).
Send("thread=" + strconv.Itoa(newNum)).
Send("name=" + config.Botname).
Send("subject=PEREKOT").
Send("comment=" + notification).
AddCookie(&cookie).
End()
if errSend != nil {
NewError("Failed to send old link (thread " + threadID + ")")
NewHistoryPoint("Failed to send old link (thread " + threadID + ")")
return
}
var responseBody PostResponse
errFormate := json.Unmarshal([]byte(body), &responseBody)
if errFormate != nil {
NewError("Failed to convert server response to JSON (old link notification in thread " + threadID + ")")
NewHistoryPoint("Failed to convert server response to JSON (old link notification in thread " + threadID + ")")
return
}
if responseBody.Error != 0 {
NewError("Failed to create old link notification (thread " + threadID + ") - error " + responseBody.Reason)
NewHistoryPoint("Failed to create old link notification (thread " + threadID + ") - error " + responseBody.Reason)
} else {
NewHistoryPoint("Old link notification in thread \"" + thread.Title + "\" was created")
}
}
func threadIncrement(oldThread models.Thread) {
db := models.DB()
defer db.Close()
var thread models.Thread
db.First(&thread, oldThread.ID)
thread.CurrentNum++
db.Save(&thread)
}
func updateThreadAddr(oldThread models.Thread, newThread int) {
db := models.DB()
defer db.Close()
var thread models.Thread
db.First(&thread, oldThread.ID)
thread.CurrentThread = newThread
db.Save(&thread)
}
func updateThreadLastData(oldThread models.Thread) {
db := models.DB()
defer db.Close()
var thread models.Thread
db.First(&thread, oldThread.ID)
thread.LastPosts = 1
thread.LastPerekot = int(time.Now().Unix())
db.Save(&thread)
}
|
package backtracking
import (
"fmt"
"testing"
)
func Test_getPermutation(t *testing.T) {
res := getPermutation2(3, 3)
if res != "213" {
t.Error(res)
}
fmt.Println(res)
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
/*
// ErrConsistencyFail happens when a consistency check fails. Check the log to retrieve details on which element is failing
const ErrConsistencyFail = "consistency check fail at index %d"
type CCOptions struct {
singleiteration bool
iterationSleepTime time.Duration
frequencySleepTime time.Duration
}
type corruptionChecker struct {
options CCOptions
dbList DatabaseList
Logger logger.Logger
exit bool
muxit sync.Mutex
Trusted bool
mux sync.Mutex
currentDbIndex int
rg RandomGenerator
}
// CorruptionChecker corruption checker interface
type CorruptionChecker interface {
Start(context.Context) (err error)
Stop()
GetStatus() bool
}
// NewCorruptionChecker returns new trust checker service
func NewCorruptionChecker(opt CCOptions, d DatabaseList, l logger.Logger, rg RandomGenerator) CorruptionChecker {
return &corruptionChecker{
options: opt,
dbList: d,
Logger: l,
exit: false,
Trusted: true,
currentDbIndex: 0,
rg: rg,
}
}
// Start start the trust checker loop
func (s *corruptionChecker) Start(ctx context.Context) (err error) {
s.Logger.Debugf("Start scanning ...")
for {
s.mux.Lock()
err = s.checkLevel0(ctx)
s.mux.Unlock()
if err != nil || s.isTerminated() || s.options.singleiteration {
return err
}
time.Sleep(s.options.iterationSleepTime)
}
}
func (s *corruptionChecker) isTerminated() bool {
s.muxit.Lock()
defer s.muxit.Unlock()
return s.exit
}
// Stop stop the trust checker loop
func (s *corruptionChecker) Stop() {
s.muxit.Lock()
s.exit = true
s.muxit.Unlock()
s.Logger.Infof("Waiting for consistency checker to shut down")
s.mux.Lock()
}
func (s *corruptionChecker) checkLevel0(ctx context.Context) (err error) {
if s.currentDbIndex == s.dbList.Length() {
s.currentDbIndex = 0
}
db := s.dbList.GetByIndex(int64(s.currentDbIndex))
s.currentDbIndex++
var r *schema.Root
s.Logger.Debugf("Retrieving a fresh root ...")
if r, err = db.CurrentRoot(); err != nil {
s.Logger.Errorf("Error retrieving root: %s", err)
return
}
if r.GetRoot() == nil {
s.Logger.Debugf("Immudb is empty ...")
} else {
// create a shuffle range with all indexes presents in immudb
ids := s.rg.getList(0, r.GetIndex())
s.Logger.Debugf("Start scanning %d elements", len(ids))
for _, id := range ids {
if s.isTerminated() {
return
}
var item *schema.VerifiedTx
if item, err = db.BySafeIndex(&schema.SafeIndexOptions{
Index: id,
RootIndex: &schema.Index{
Index: r.GetIndex(),
},
}); err != nil {
if err == store.ErrInconsistentDigest {
auth.IsTampered = true
s.Logger.Errorf("insertion order index %d was tampered", id)
return
}
s.Logger.Errorf("Error retrieving element at index %d: %s", id, err)
return
}
//verified := item.Proof.Verify(item.Item.Value, *r)
verified := item != nil
s.Logger.Debugf("Item index %d, verified %t", item.Tx.Metadata.Id, verified)
if !verified {
s.Trusted = false
auth.IsTampered = true
s.Logger.Errorf(ErrConsistencyFail, item.Tx.Metadata.Id)
return
}
time.Sleep(s.options.frequencySleepTime)
}
}
return nil
}
// GetStatus return status of the trust checker. False means that a consistency checks was failed
func (s *corruptionChecker) GetStatus() bool {
return s.Trusted
}
*/
/*
type cryptoRandSource struct{}
func newCryptoRandSource() cryptoRandSource {
return cryptoRandSource{}
}
func (cryptoRandSource) Int63() int64 {
var b [8]byte
_, _ = rand.Read(b[:])
return int64(binary.LittleEndian.Uint64(b[:]) & (1<<63 - 1))
}
func (cryptoRandSource) Seed(_ int64) {}
*/
/*
type randomGenerator struct{}
type RandomGenerator interface {
getList(uint64, uint64) []uint64
}
func (rg randomGenerator) getList(start, end uint64) []uint64 {
ids := make([]uint64, end-start+1)
var i uint64
for i = start; i <= end; i++ {
ids[i] = i
}
rn := mrand.New(newCryptoRandSource())
// shuffle indexes
rn.Shuffle(len(ids), func(i, j int) { ids[i], ids[j] = ids[j], ids[i] })
return ids
}
*/
|
package bo
//order 排序规则
type Order struct {
Column string `json:"column"`
Asc string `json:"asc"`
}
type MenuPermission struct {
Permission string `json:"permission"`
}
type DeptCommon struct {
ID int `json:"id"`
Name string `json:"name"`
}
type Job struct {
ID int `json:"id"`
Name string `json:"name"`
}
type Role struct {
ID int `json:"id"`
Level int `json:"level"`
Name string `json:"name"`
DataScope string `json:"dataScope"`
}
//paging 分页器所含字段 (公共父类)
type paging struct {
Current int `json:"current"`
CountID int `json:"count_id"`
MaxLimit int `json:"maxLimit"`
Page int `json:"page"`
SearchCount bool `json:"searchCount"`
Size int `json:"size"`
Total int `json:"total"`
HitCount bool `json:"hitCount"`
OptimizeCountSql bool `json:"optimizeCountSql"`
Orders []Order `json:"orders"`
}
|
package main
import (
"github.com/jmhobbs/wordpress-scanner/client/cmd"
)
func main() {
cmd.Execute()
}
|
// 32. Break HMAC-SHA1 with a slightly less artificial timing leak
package main
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"fmt"
"hash"
"io"
"log"
weak "math/rand"
"mime/multipart"
"net"
"net/http"
"os"
"sync"
"time"
)
func init() { weak.Seed(time.Now().UnixNano()) }
const (
delay = 5 * time.Millisecond
addr = "localhost:9000"
path = "/test"
)
func main() {
key := RandomBytes(RandomInRange(8, 64))
hm := hmac.New(sha1.New, key)
go func() {
log.Fatal(http.ListenAndServe(addr, NewHandler(hm)))
}()
// Wait for the server.
if c, err := net.DialTimeout("tcp", addr, time.Second); err != nil {
log.Fatal(err)
} else {
c.Close()
}
url := fmt.Sprintf("http://%s%s", addr, path)
buf := new(bytes.Buffer)
files := os.Args[1:]
if len(files) == 0 {
io.Copy(buf, os.Stdin)
err := breakHMAC(hm, url, buf.Bytes(), "user input")
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
return
}
for _, file := range files {
f, err := os.Open(file)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
io.Copy(buf, f)
err = breakHMAC(hm, url, buf.Bytes(), file)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
buf.Reset()
f.Close()
}
}
// breakHMAC prints a valid HMAC and attempts to break the server.
func breakHMAC(hm hash.Hash, url string, buf []byte, file string) error {
hm.Reset()
hm.Write(buf)
fmt.Printf("attempting to upload %s...\n%x\n", file, hm.Sum([]byte{}))
sig := hex.EncodeToString(breakServer(url, buf, file, hm.Size()))
resp, err := upload(url, buf, file, sig)
if err != nil {
return err
}
if resp.StatusCode == http.StatusOK {
fmt.Printf("successfully uploaded %s\n", file)
}
return nil
}
// breakServer returns a valid HMAC for uploading an arbitrary file.
func breakServer(url string, buf []byte, file string, size int) []byte {
res := make([]byte, size)
loop := func(i int) byte {
var (
b byte
best int64
)
for j := 0; j <= 0xff; j++ {
res[i] = byte(j)
sig := hex.EncodeToString(res)
if n, err := timedUpload(url, buf, file, sig); err != nil {
log.Fatal(err)
} else if n > best {
best = n
b = byte(j)
}
}
return b
}
// Double-check each byte to mitigate false positives.
for i := range res {
prev, b := loop(i), loop(i)
for b != prev {
prev = b
b = loop(i)
}
res[i] = b
fmt.Printf("%02x", b)
}
fmt.Println()
return res
}
// timedUpload sends a request and returns the time it takes to receive a response.
func timedUpload(url string, buf []byte, file, sig string) (int64, error) {
start := time.Now()
if _, err := upload(url, buf, file, sig); err != nil {
return 0, err
}
return time.Since(start).Nanoseconds(), nil
}
// upload uploads a file and hex-encoded signature, and returns the response.
func upload(url string, buf []byte, file, sig string) (*http.Response, error) {
tmp := new(bytes.Buffer)
m := multipart.NewWriter(tmp)
part, err := m.CreateFormFile("file", file)
if err != nil {
return nil, err
}
part.Write(buf)
if err = m.WriteField("signature", sig); err != nil {
return nil, err
}
contentType := m.FormDataContentType()
m.Close()
return http.Post(url, contentType, tmp)
}
// handler represents an HTTP handler.
type handler struct {
hash.Hash
*sync.Mutex
}
// NewHandler takes an HMAC hash and returns an HTTP handler.
func NewHandler(hm hash.Hash) http.Handler {
return handler{hm, new(sync.Mutex)}
}
// ServeHTTP responds to upload requests with 200 OK if the file HMAC
// matches its signature, and 500 Internal Server Error otherwise.
func (x handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
f, _, err := req.FormFile("file")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
sig, err := hex.DecodeString(req.FormValue("signature"))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
// Acquire a lock to prevent concurrent hashing.
x.Lock()
x.Reset()
io.Copy(x, f)
sum := x.Sum([]byte{})
x.Unlock()
if !insecureCompare(sig, sum) {
w.WriteHeader(http.StatusInternalServerError)
return
}
// 200 OK
}
// insecureCompare compares two buffers one byte at a time,
// returning false upon finding a mismatched pair of bytes.
func insecureCompare(b1, b2 []byte) bool {
for len(b1) != 0 && len(b2) != 0 {
if b1[0] != b2[0] {
return false
}
b1, b2 = b1[1:], b2[1:]
time.Sleep(delay)
}
return len(b1) == len(b2)
}
// RandomBytes returns a random buffer of the desired length.
func RandomBytes(n int) []byte {
buf := make([]byte, n)
if _, err := rand.Read(buf); err != nil {
panic(err)
}
return buf
}
// RandomInRange returns a pseudo-random non-negative integer in [lo, hi].
// The output should not be used in a security-sensitive context.
func RandomInRange(lo, hi int) int {
if lo < 0 || lo > hi {
panic("RandomInRange: invalid range")
}
return lo + weak.Intn(hi-lo+1)
}
|
package aliyun
import (
"encoding/json"
"fmt"
"sync"
"github.com/gogap/config"
"github.com/gogap/context"
"github.com/gogap/flow"
"github.com/sirupsen/logrus"
)
func init() {
flow.RegisterHandler("devops.aliyun.rds.instance.create", CreateRDSInstance)
flow.RegisterHandler("devops.aliyun.rds.instance.attribute.describe", DescribeRDSInstanceAttr)
flow.RegisterHandler("devops.aliyun.rds.instance.netinfo.describe", DescribeRDSInstanceNetInfo)
flow.RegisterHandler("devops.aliyun.rds.instance.delete", DeleteRDSInstance)
flow.RegisterHandler("devops.aliyun.rds.instance.running.wait", WaitForAllRDSRunning)
flow.RegisterHandler("devops.aliyun.rds.instance.account.create", CreateRDSDbAccounts)
flow.RegisterHandler("devops.aliyun.rds.instance.conn.public.alloc", AllocateInstancePublicConnection)
flow.RegisterHandler("devops.aliyun.rds.instance.conn.public.release", ReleaseInstancePublicConnection)
}
func CreateRDSInstance(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
_, err = aliyun.CreateRDSInstances()
if err != nil {
return
}
return
}
func CreateRDSDbAccounts(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
err = aliyun.CreateRDSDbAccount()
return
}
func DeleteRDSInstance(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
err = aliyun.DeleteRDSInstances()
return
}
func AllocateInstancePublicConnection(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
err = aliyun.AllocateInstancePublicConnection()
return
}
func ReleaseInstancePublicConnection(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
err = aliyun.ReleaseInstancePublicConnection()
return
}
func DescribeRDSInstanceAttr(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
insts, err := aliyun.DescribeRDSInstancesAttr()
if err != nil {
return
}
if len(insts) == 0 {
return
}
ouputData, err := json.Marshal(insts)
if err != nil {
return
}
var tags []string
for _, inst := range insts {
tags = append(tags, inst.Name)
setENV(fmt.Sprintf("rds_db_%s_host", inst.Name), inst.ConnectionString)
setENV(fmt.Sprintf("rds_db_%s_port", inst.Name), inst.Port)
}
tags = append(tags, "aliyun", "rds", aliyun.Code)
flow.AppendOutput(ctx, flow.NameValue{Name: "ALIYUN_RDS_INSTANCES", Value: ouputData, Tags: tags})
return
}
func DescribeRDSInstanceNetInfo(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
insts, err := aliyun.DescribeDBInstanceNetInfo()
if err != nil {
return
}
if len(insts) == 0 {
return
}
ouputData, err := json.Marshal(insts)
if err != nil {
return
}
var tags []string
for _, inst := range insts {
tags = append(tags, inst.InstanceName)
for _, netinfo := range inst.NetInfo {
setENV(fmt.Sprintf("rds_db_%s_%s_host", inst.InstanceName, netinfo.IPType), netinfo.ConnectionString)
setENV(fmt.Sprintf("rds_db_%s_%s_port", inst.InstanceName, netinfo.IPType), netinfo.Port)
}
}
tags = append(tags, "aliyun", "rds", aliyun.Code)
flow.AppendOutput(ctx, flow.NameValue{Name: "ALIYUN_RDS_INSTANCES_NET_INFO", Value: ouputData, Tags: tags})
return
}
func WaitForAllRDSRunning(ctx context.Context, conf config.Configuration) (err error) {
aliyun := NewAliyun(ctx, conf)
inst, err := aliyun.listRDSInstance(nil)
if err != nil {
return
}
wg := &sync.WaitGroup{}
for _, v := range inst.Items.DBInstance {
wg.Add(1)
go func(instId, name string) {
defer wg.Done()
logrus.WithField("CODE", aliyun.Code).WithField("RDS-DBINSTANCE-ID", instId).WithField("RDS-DBINSTANCE-NAME", name).Infoln("Waiting db instance")
aliyun.WaitForDBInstance(instId, "Running", 60*20)
}(v.DBInstanceId, v.DBInstanceDescription)
}
wg.Wait()
return
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mydump_test
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/pingcap/tidb/br/pkg/lightning/config"
. "github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/lightning/worker"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// var expectedTuplesCount = map[string]int64{
// "i": 1,
// "report_case_high_risk": 1,
// "tbl_autoid": 10000,
// "tbl_multi_index": 10000,
// }
/*
TODO : test with specified 'regionBlockSize' ...
*/
func TestTableRegion(t *testing.T) {
cfg := newConfigWithSourceDir("./examples")
loader, _ := NewMyDumpLoader(context.Background(), cfg)
dbMeta := loader.GetDatabases()[0]
ioWorkers := worker.NewPool(context.Background(), 1, "io")
for _, meta := range dbMeta.Tables {
divideConfig := NewDataDivideConfig(cfg, 1, ioWorkers, loader.GetStore(), meta)
regions, err := MakeTableRegions(context.Background(), divideConfig)
require.NoError(t, err)
// check - region-size vs file-size
var tolFileSize int64 = 0
for _, file := range meta.DataFiles {
tolFileSize += file.FileMeta.FileSize
}
var tolRegionSize int64 = 0
for _, region := range regions {
tolRegionSize += region.Size()
}
require.Equal(t, tolFileSize, tolRegionSize)
// // check - rows num
// var tolRows int64 = 0
// for _, region := range regions {
// tolRows += region.Rows()
// }
// c.Assert(tolRows, Equals, expectedTuplesCount[table])
// check - range
regionNum := len(regions)
preReg := regions[0]
for i := 1; i < regionNum; i++ {
reg := regions[i]
if preReg.FileMeta.Path == reg.FileMeta.Path {
require.Equal(t, preReg.Offset()+preReg.Size(), reg.Offset())
require.Equal(t, preReg.RowIDMin()+preReg.Rows(), reg.RowIDMin())
} else {
require.Equal(t, 0, reg.Offset())
require.Equal(t, 1, reg.RowIDMin())
}
preReg = reg
}
}
}
func TestAllocateEngineIDs(t *testing.T) {
dataFileSizes := make([]float64, 700)
for i := range dataFileSizes {
dataFileSizes[i] = 1.0
}
filesRegions := make([]*TableRegion, 0, len(dataFileSizes))
for range dataFileSizes {
filesRegions = append(filesRegions, new(TableRegion))
}
checkEngineSizes := func(what string, expected map[int32]int) {
actual := make(map[int32]int)
for _, region := range filesRegions {
actual[region.EngineID]++
}
require.Equal(t, expected, actual, what)
}
// Batch size > Total size => Everything in the zero batch.
AllocateEngineIDs(filesRegions, dataFileSizes, 1000, 0.5, 1000)
checkEngineSizes("no batching", map[int32]int{
0: 700,
})
// Allocate 3 engines.
AllocateEngineIDs(filesRegions, dataFileSizes, 200, 0.5, 1000)
checkEngineSizes("batch size = 200", map[int32]int{
0: 170,
1: 213,
2: 317,
})
// Allocate 3 engines with an alternative ratio
AllocateEngineIDs(filesRegions, dataFileSizes, 200, 0.6, 1000)
checkEngineSizes("batch size = 200, ratio = 0.6", map[int32]int{
0: 160,
1: 208,
2: 332,
})
// Allocate 5 engines.
AllocateEngineIDs(filesRegions, dataFileSizes, 100, 0.5, 1000)
checkEngineSizes("batch size = 100", map[int32]int{
0: 93,
1: 105,
2: 122,
3: 153,
4: 227,
})
// Number of engines > table concurrency
AllocateEngineIDs(filesRegions, dataFileSizes, 50, 0.5, 4)
checkEngineSizes("batch size = 50, limit table conc = 4", map[int32]int{
0: 50,
1: 59,
2: 73,
3: 110,
4: 50,
5: 50,
6: 50,
7: 50,
8: 50,
9: 50,
10: 50,
11: 50,
12: 8,
})
// Zero ratio = Uniform
AllocateEngineIDs(filesRegions, dataFileSizes, 100, 0.0, 1000)
checkEngineSizes("batch size = 100, ratio = 0", map[int32]int{
0: 100,
1: 100,
2: 100,
3: 100,
4: 100,
5: 100,
6: 100,
})
}
func TestMakeTableRegionsSplitLargeFile(t *testing.T) {
cfg := &config.Config{
Mydumper: config.MydumperRuntime{
ReadBlockSize: config.ReadBlockSize,
MaxRegionSize: 1,
CSV: config.CSVConfig{
Separator: ",",
Delimiter: "",
Header: true,
HeaderSchemaMatch: true,
TrimLastSep: false,
NotNull: false,
Null: []string{"NULL"},
EscapedBy: `\`,
},
StrictFormat: true,
Filter: []string{"*.*"},
},
}
filePath := "./csv/split_large_file.csv"
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{Path: filePath, Type: SourceTypeCSV, FileSize: fileSize}}
colCnt := 3
columns := []string{"a", "b", "c"}
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_file",
DataFiles: []FileInfo{fileInfo},
}
ctx := context.Background()
store, err := storage.NewLocalStorage(".")
assert.NoError(t, err)
meta.DataFiles[0].FileMeta.Compression = CompressionNone
divideConfig := NewDataDivideConfig(cfg, colCnt, nil, store, meta)
regions, err := MakeTableRegions(ctx, divideConfig)
assert.NoError(t, err)
offsets := [][]int64{{6, 12}, {12, 18}, {18, 24}, {24, 30}}
assert.Len(t, regions, len(offsets))
for i := range offsets {
assert.Equal(t, offsets[i][0], regions[i].Chunk.Offset)
assert.Equal(t, offsets[i][1], regions[i].Chunk.EndOffset)
assert.Equal(t, columns, regions[i].Chunk.Columns)
}
// test - gzip compression
meta.DataFiles[0].FileMeta.Compression = CompressionGZ
regions, err = MakeTableRegions(ctx, divideConfig)
assert.NoError(t, err)
assert.Len(t, regions, 1)
assert.Equal(t, int64(0), regions[0].Chunk.Offset)
assert.Equal(t, TableFileSizeINF, regions[0].Chunk.EndOffset)
assert.Len(t, regions[0].Chunk.Columns, 0)
// test canceled context will not panic
ctx, cancel := context.WithCancel(context.Background())
cancel()
for i := 0; i < 20; i++ {
_, _ = MakeTableRegions(ctx, divideConfig)
}
}
func TestCompressedMakeSourceFileRegion(t *testing.T) {
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_file",
}
filePath := "./csv/split_large_file.csv.zst"
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{
Path: filePath,
Type: SourceTypeCSV,
Compression: CompressionZStd,
FileSize: fileSize,
}}
colCnt := 3
ctx := context.Background()
store, err := storage.NewLocalStorage(".")
assert.NoError(t, err)
compressRatio, err := SampleFileCompressRatio(ctx, fileInfo.FileMeta, store)
require.NoError(t, err)
fileInfo.FileMeta.RealSize = int64(compressRatio * float64(fileInfo.FileMeta.FileSize))
divideConfig := &DataDivideConfig{
ColumnCnt: colCnt,
TableMeta: meta,
}
regions, sizes, err := MakeSourceFileRegion(ctx, divideConfig, fileInfo)
assert.NoError(t, err)
assert.Len(t, regions, 1)
assert.Equal(t, int64(0), regions[0].Chunk.Offset)
assert.Equal(t, int64(0), regions[0].Chunk.RealOffset)
assert.Equal(t, TableFileSizeINF, regions[0].Chunk.EndOffset)
rowIDMax := fileInfo.FileMeta.RealSize * CompressSizeFactor / int64(colCnt)
assert.Equal(t, rowIDMax, regions[0].Chunk.RowIDMax)
assert.Len(t, regions[0].Chunk.Columns, 0)
assert.Equal(t, fileInfo.FileMeta.RealSize, int64(sizes[0]))
}
func TestSplitLargeFile(t *testing.T) {
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_file",
}
cfg := &config.Config{
Mydumper: config.MydumperRuntime{
ReadBlockSize: config.ReadBlockSize,
CSV: config.CSVConfig{
Separator: ",",
Delimiter: "",
Header: true,
HeaderSchemaMatch: true,
TrimLastSep: false,
NotNull: false,
Null: []string{"NULL"},
EscapedBy: `\`,
},
StrictFormat: true,
Filter: []string{"*.*"},
},
}
filePath := "./csv/split_large_file.csv"
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{Path: filePath, Type: SourceTypeCSV, FileSize: fileSize}}
ioWorker := worker.NewPool(context.Background(), 4, "io")
store, err := storage.NewLocalStorage(".")
assert.NoError(t, err)
divideConfig := NewDataDivideConfig(cfg, 3, ioWorker, store, meta)
columns := []string{"a", "b", "c"}
for _, tc := range []struct {
maxRegionSize config.ByteSize
offsets [][]int64
}{
{1, [][]int64{{6, 12}, {12, 18}, {18, 24}, {24, 30}}},
{6, [][]int64{{6, 18}, {18, 30}}},
{8, [][]int64{{6, 18}, {18, 30}}},
{12, [][]int64{{6, 24}, {24, 30}}},
{13, [][]int64{{6, 24}, {24, 30}}},
{18, [][]int64{{6, 30}}},
{19, [][]int64{{6, 30}}},
} {
divideConfig.MaxChunkSize = int64(tc.maxRegionSize)
regions, _, err := SplitLargeCSV(context.Background(), divideConfig, fileInfo)
assert.NoError(t, err)
assert.Len(t, regions, len(tc.offsets))
for i := range tc.offsets {
assert.Equal(t, tc.offsets[i][0], regions[i].Chunk.Offset)
assert.Equal(t, tc.offsets[i][1], regions[i].Chunk.EndOffset)
assert.Equal(t, columns, regions[i].Chunk.Columns)
}
}
}
func TestSplitLargeFileNoNewLineAtEOF(t *testing.T) {
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_file",
}
cfg := &config.Config{
Mydumper: config.MydumperRuntime{
ReadBlockSize: config.ReadBlockSize,
CSV: config.CSVConfig{
Separator: ",",
Delimiter: "",
Header: true,
HeaderSchemaMatch: true,
TrimLastSep: false,
NotNull: false,
Null: []string{"NULL"},
EscapedBy: `\`,
},
StrictFormat: true,
Filter: []string{"*.*"},
MaxRegionSize: 1,
},
}
dir := t.TempDir()
fileName := "test.csv"
filePath := filepath.Join(dir, fileName)
content := []byte("a,b\r\n123,456\r\n789,101")
err := os.WriteFile(filePath, content, 0o644)
require.NoError(t, err)
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{Path: fileName, Type: SourceTypeCSV, FileSize: fileSize}}
ioWorker := worker.NewPool(context.Background(), 4, "io")
store, err := storage.NewLocalStorage(dir)
require.NoError(t, err)
divideConfig := NewDataDivideConfig(cfg, 2, ioWorker, store, meta)
columns := []string{"a", "b"}
offsets := [][]int64{{4, 13}, {13, 21}}
regions, _, err := SplitLargeCSV(context.Background(), divideConfig, fileInfo)
require.NoError(t, err)
require.Len(t, regions, len(offsets))
for i := range offsets {
require.Equal(t, offsets[i][0], regions[i].Chunk.Offset)
require.Equal(t, offsets[i][1], regions[i].Chunk.EndOffset)
require.Equal(t, columns, regions[i].Chunk.Columns)
}
}
func TestSplitLargeFileWithCustomTerminator(t *testing.T) {
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_with_custom_terminator",
}
cfg := &config.Config{
Mydumper: config.MydumperRuntime{
ReadBlockSize: config.ReadBlockSize,
CSV: config.CSVConfig{
Separator: "|+|",
Terminator: "|+|\n",
},
StrictFormat: true,
Filter: []string{"*.*"},
MaxRegionSize: 1,
},
}
dir := t.TempDir()
fileName := "test2.csv"
filePath := filepath.Join(dir, fileName)
content := []byte("5|+|abc\ndef\nghi|+|6|+|\n7|+|xyz|+|8|+|\n9|+||+|10")
err := os.WriteFile(filePath, content, 0o644)
require.NoError(t, err)
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{Path: fileName, Type: SourceTypeCSV, FileSize: fileSize}}
ioWorker := worker.NewPool(context.Background(), 4, "io")
store, err := storage.NewLocalStorage(dir)
require.NoError(t, err)
divideConfig := NewDataDivideConfig(cfg, 3, ioWorker, store, meta)
offsets := [][]int64{{0, 23}, {23, 38}, {38, 47}}
regions, _, err := SplitLargeCSV(context.Background(), divideConfig, fileInfo)
require.NoError(t, err)
require.Len(t, regions, len(offsets))
for i := range offsets {
require.Equal(t, offsets[i][0], regions[i].Chunk.Offset)
require.Equal(t, offsets[i][1], regions[i].Chunk.EndOffset)
}
}
func TestSplitLargeFileOnlyOneChunk(t *testing.T) {
meta := &MDTableMeta{
DB: "csv",
Name: "large_csv_file",
}
cfg := &config.Config{
Mydumper: config.MydumperRuntime{
ReadBlockSize: config.ReadBlockSize,
CSV: config.CSVConfig{
Separator: ",",
Delimiter: "",
Header: true,
HeaderSchemaMatch: true,
TrimLastSep: false,
NotNull: false,
Null: []string{"NULL"},
EscapedBy: `\`,
},
StrictFormat: true,
Filter: []string{"*.*"},
MaxRegionSize: 15,
},
}
dir := t.TempDir()
fileName := "test.csv"
filePath := filepath.Join(dir, fileName)
content := []byte("field1,field2\r\n123,456\r\n")
err := os.WriteFile(filePath, content, 0o644)
require.NoError(t, err)
dataFileInfo, err := os.Stat(filePath)
require.NoError(t, err)
fileSize := dataFileInfo.Size()
fileInfo := FileInfo{FileMeta: SourceFileMeta{Path: fileName, Type: SourceTypeCSV, FileSize: fileSize}}
columns := []string{"field1", "field2"}
ioWorker := worker.NewPool(context.Background(), 4, "io")
store, err := storage.NewLocalStorage(dir)
require.NoError(t, err)
divideConfig := NewDataDivideConfig(cfg, 2, ioWorker, store, meta)
offsets := [][]int64{{14, 24}}
regions, _, err := SplitLargeCSV(context.Background(), divideConfig, fileInfo)
require.NoError(t, err)
require.Len(t, regions, len(offsets))
for i := range offsets {
require.Equal(t, offsets[i][0], regions[i].Chunk.Offset)
require.Equal(t, offsets[i][1], regions[i].Chunk.EndOffset)
require.Equal(t, columns, regions[i].Chunk.Columns)
}
}
|
package fileutil
import (
"fmt"
"os"
)
// this module is here to check that only necessary modules (the ones that are actually imported are downloaded from the repository and not the whole repository)
func Experiment() int {
fmt.Println("You Should not use me")
os.Exit(-1)
return -1
}
|
package catm
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00300101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catm.003.001.01 Document"`
Message *AcceptorConfigurationUpdateV01 `xml:"AccptrCfgtnUpd"`
}
func (d *Document00300101) AddMessage() *AcceptorConfigurationUpdateV01 {
d.Message = new(AcceptorConfigurationUpdateV01)
return d.Message
}
// Scope
// The AcceptorConfigurationUpdate message is sent by the master terminal manager or delegated terminal manager to the acceptor system for the update of acquirer parameters, merchant parameters, vendor parameters or cryptographic keys of the acquirer.
// Usage
// The AcceptorConfigurationUpdate message may embed the information required by the acceptor system for the configuration of:
// - the application parameters necessary for software applications processed by the POI system,
// - the acquirer protocol parameters for the message content and message exchange behaviour of the acquirer protocol supported by the POI system,
// - the host communication parameters to define the addresses of the connected acquirer hosts, and
// - the merchant parameters needed for the retailer protocol settings of the POI system.
type AcceptorConfigurationUpdateV01 struct {
// Set of characteristics related to the transfer of the acceptor parameters.
Header *iso20022.Header4 `xml:"Hdr"`
// Acceptor configuration to be downloaded from the terminal management system.
AcceptorConfiguration *iso20022.AcceptorConfiguration1 `xml:"AccptrCfgtn"`
// Trailer of the message containing a MAC or a digital signature.
SecurityTrailer *iso20022.ContentInformationType1 `xml:"SctyTrlr"`
}
func (a *AcceptorConfigurationUpdateV01) AddHeader() *iso20022.Header4 {
a.Header = new(iso20022.Header4)
return a.Header
}
func (a *AcceptorConfigurationUpdateV01) AddAcceptorConfiguration() *iso20022.AcceptorConfiguration1 {
a.AcceptorConfiguration = new(iso20022.AcceptorConfiguration1)
return a.AcceptorConfiguration
}
func (a *AcceptorConfigurationUpdateV01) AddSecurityTrailer() *iso20022.ContentInformationType1 {
a.SecurityTrailer = new(iso20022.ContentInformationType1)
return a.SecurityTrailer
}
|
/*
Write a program that outputs the string representation of numbers from 1 to n.
But for multiples of three it should output “Fizz” instead of the number and
for the multiples of five output “Buzz”. For numbers which are multiples of
both three and five output “FizzBuzz”.
*/
package main
import "fmt"
func main() {
for i := 1; i <= 15; i++ {
switch {
case i%3 == 0 && i%5 == 0:
fmt.Println("FizBuzz")
case i%3 == 0 && i%5 != 0:
fmt.Println("Fiz")
case i%3 != 0 && i%5 == 0:
fmt.Println("Buzz")
default:
fmt.Println(i)
}
}
}
|
package processAvaatechSpe
import ()
// Tests with bAxil on spectra from both instruments indicate that offset is
// always << 20 eV, which is < 1 channel width. Thus, it can be
// approximated as 0 eV for all fitting in this module.
// Gain varies from ~20.0 eV/ch to ~20.4 eV/ch, which leads to channel differences of
// ~2 at Si Ka and ~7 out at Fe Kb. Thus, gain needs to be a variable. Min and Max
// allowable gain values will be passed through when calling "Process"
func (spect *Spectrum) AssignPrimaryLines(gainMinKeV float64, gainMaxKeV float64) {
var lineMap map[string]*Peak
var peak *Peak
var startSearch int
var maxLineCh, minLineCh float64
lineMap = make(map[string]*Peak)
nPeaks := len(spect.Peaks)
nLines := len(primary_lineList)
startSearch = 0
for i := 0; i < nPeaks; i++ {
peak = spect.Peaks[i]
for j := startSearch; j < nLines; j++ {
minLineCh = primary_lineList[j].Energy / gainMaxKeV // maxGain => minCh
maxLineCh = primary_lineList[j].Energy / gainMinKeV
if (peak.Channel >= minLineCh) && (peak.Channel <= maxLineCh) {
lineMap[primary_lineList[j].Name] = peak
startSearch = j + 1
break
}
}
}
spect.Lines = lineMap
}
func (spect *Spectrum) AssignSecondaryLines() {
var channelFlt float64
var channelInt int
var peak *Peak
nLines := len(secondary_lineList)
for i := 0; i < nLines; i++ {
channelFlt = keVtoChannel(secondary_lineList[i].Energy, spect.Gain, spect.Offset)
channelInt = int(channelFlt)
peak = new(Peak)
peak.Channel = channelFlt
peak.Height = spect.Signal[channelInt]
peak.Total = spect.SPE.Counts[channelInt]
spect.Lines[secondary_lineList[i].Name] = peak
}
}
|
package shared
import (
"context"
"database/sql"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
)
type roomRecentEventsUpdater struct {
transaction
d *Database
roomNID types.RoomNID
latestEvents []types.StateAtEventAndReference
lastEventIDSent string
currentStateSnapshotNID types.StateSnapshotNID
}
func NewRoomRecentEventsUpdater(d *Database, ctx context.Context, roomNID types.RoomNID, useTxns bool) (types.RoomRecentEventsUpdater, error) {
txn, err := d.DB.Begin()
if err != nil {
return nil, err
}
eventNIDs, lastEventNIDSent, currentStateSnapshotNID, err :=
d.RoomsTable.SelectLatestEventsNIDsForUpdate(ctx, txn, roomNID)
if err != nil {
txn.Rollback() // nolint: errcheck
return nil, err
}
stateAndRefs, err := d.EventsTable.BulkSelectStateAtEventAndReference(ctx, txn, eventNIDs)
if err != nil {
txn.Rollback() // nolint: errcheck
return nil, err
}
var lastEventIDSent string
if lastEventNIDSent != 0 {
lastEventIDSent, err = d.EventsTable.SelectEventID(ctx, txn, lastEventNIDSent)
if err != nil {
txn.Rollback() // nolint: errcheck
return nil, err
}
}
if !useTxns {
txn.Commit() // nolint: errcheck
txn = nil
}
return &roomRecentEventsUpdater{
transaction{ctx, txn}, d, roomNID, stateAndRefs, lastEventIDSent, currentStateSnapshotNID,
}, nil
}
// RoomVersion implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) RoomVersion() (version gomatrixserverlib.RoomVersion) {
version, _ = u.d.GetRoomVersionForRoomNID(u.ctx, u.roomNID)
return
}
// LatestEvents implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) LatestEvents() []types.StateAtEventAndReference {
return u.latestEvents
}
// LastEventIDSent implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) LastEventIDSent() string {
return u.lastEventIDSent
}
// CurrentStateSnapshotNID implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) CurrentStateSnapshotNID() types.StateSnapshotNID {
return u.currentStateSnapshotNID
}
// StorePreviousEvents implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) StorePreviousEvents(eventNID types.EventNID, previousEventReferences []gomatrixserverlib.EventReference) error {
for _, ref := range previousEventReferences {
if err := u.d.PrevEventsTable.InsertPreviousEvent(u.ctx, u.txn, ref.EventID, ref.EventSHA256, eventNID); err != nil {
return err
}
}
return nil
}
// IsReferenced implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) IsReferenced(eventReference gomatrixserverlib.EventReference) (bool, error) {
err := u.d.PrevEventsTable.SelectPreviousEventExists(u.ctx, u.txn, eventReference.EventID, eventReference.EventSHA256)
if err == nil {
return true, nil
}
if err == sql.ErrNoRows {
return false, nil
}
return false, err
}
// SetLatestEvents implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) SetLatestEvents(
roomNID types.RoomNID, latest []types.StateAtEventAndReference, lastEventNIDSent types.EventNID,
currentStateSnapshotNID types.StateSnapshotNID,
) error {
eventNIDs := make([]types.EventNID, len(latest))
for i := range latest {
eventNIDs[i] = latest[i].EventNID
}
return u.d.RoomsTable.UpdateLatestEventNIDs(u.ctx, u.txn, roomNID, eventNIDs, lastEventNIDSent, currentStateSnapshotNID)
}
// HasEventBeenSent implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) HasEventBeenSent(eventNID types.EventNID) (bool, error) {
return u.d.EventsTable.SelectEventSentToOutput(u.ctx, u.txn, eventNID)
}
// MarkEventAsSent implements types.RoomRecentEventsUpdater
func (u *roomRecentEventsUpdater) MarkEventAsSent(eventNID types.EventNID) error {
return u.d.EventsTable.UpdateEventSentToOutput(u.ctx, u.txn, eventNID)
}
func (u *roomRecentEventsUpdater) MembershipUpdater(targetUserNID types.EventStateKeyNID, targetLocal bool) (types.MembershipUpdater, error) {
return u.d.membershipUpdaterTxn(u.ctx, u.txn, u.roomNID, targetUserNID, targetLocal)
}
|
package main
import . "../middleware"
func main() {
conn := Connection{}
conn.CreateConnection("127.0.0.1", "8082", "tcp")
conn.Start()
session := TopicSession{}
session.CreateSession(conn)
topic := session.CreateTopic("perfumaria")
topic2 := session.CreateTopic("eletronicos")
publisher := session.CreateTopicPublisher(topic)
publisher2 := session.CreateTopicPublisher(topic2)
//publisher2 := session.CreateTopicPublisher(topic)
subscriber1 := session.CreateTopicSubscriber(topic)
subscriber2 := session.CreateTopicSubscriber(topic2)
subscriber3 := session.CreateTopicSubscriber(topic)
subscriber3 = session.CreateTopicSubscriber(topic2)
// subscriber2 := session.CreateTopicSubscriber(topic)
// subscriber3 := session.CreateTopicSubscriber(topic)
subscriber1.GetTopic()
subscriber2.GetTopic()
subscriber3.GetTopic()
publisher.Send(session.CreateMessage("Pau que nasce torto\nhttp://google.com.br", "perfumaria",1,"m1"))
publisher.Send(session.CreateMessage("Menina quando...\nhttp://google.com.br", "perfumaria",1,"m2"))
publisher2.Send(session.CreateMessage("novo macbook\nhttp://google.com.br", "eletronicos",1,"m3"))
//publisher2.Send(session.CreateMessage("Nunca de endireita","arborismo", 5,"m2"))
block := make(chan int)
go notificationListener(&subscriber3)
go readNotifications(&publisher2, &session, "eletronicos", block)
<-block
}
|
// Package geocoder provides an easy way to use the Google Geocoding API
package geocoder
import (
"encoding/json"
"errors"
"log"
"net/http"
"strconv"
"strings"
"github.com/kelvins/geocoder/structs"
)
// The user should set the API KEY provided by Google
var ApiKey string
// Define the Geocode API URL as a constant
const (
geocodeApiUrl = "https://maps.googleapis.com/maps/api/geocode/json?"
)
// Address structure used in the Geocoding and GeocodingReverse functions
// Note: The FormattedAddress field should be used only for the GeocodingReverse
// to get the formatted address from the Google Geocoding API. It is not used in
// the Geocoding function.
type Address struct {
Street string
Number int
Neighborhood string
District string
City string
County string
State string
Country string
PostalCode string
FormattedAddress string
Types string
}
// Location structure used in the Geocoding and GeocodingReverse functions
type Location struct {
Latitude float64
Longitude float64
}
// Format an address based on the Address structure
// Return the formated address (string)
func (address *Address) FormatAddress() string {
// Creats a slice with all content from the Address struct
var content []string
if address.Number > 0 {
content = append(content, strconv.Itoa(address.Number))
}
content = append(content, address.Street)
content = append(content, address.Neighborhood)
content = append(content, address.District)
content = append(content, address.PostalCode)
content = append(content, address.City)
content = append(content, address.County)
content = append(content, address.State)
content = append(content, address.Country)
var formattedAddress string
// For each value in the content slice check if it is valid
// and add to the formattedAddress string
for _, value := range content {
if value != "" {
if formattedAddress != "" {
formattedAddress += ", "
}
formattedAddress += value
}
}
return formattedAddress
}
// httpRequest function send the HTTP request, decode the JSON
// and return a Results structure
func httpRequest(url string) (structs.Results, error) {
var results structs.Results
// Build the request
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return results, err
}
// For control over HTTP client headers, redirect policy, and other settings, create a Client
// A Client is an HTTP client
client := &http.Client{}
// Send the request via a client
// Do sends an HTTP request and returns an HTTP response
resp, err := client.Do(req)
if err != nil {
return results, err
}
// Callers should close resp.Body when done reading from it
// Defer the closing of the body
defer resp.Body.Close()
// Use json.Decode for reading streams of JSON data
err = json.NewDecoder(resp.Body).Decode(&results)
if err != nil {
return results, err
}
// The "OK" status indicates that no error has occurred, it means
// the address was analyzed and at least one geographic code was returned
if strings.ToUpper(results.Status) != "OK" {
// If the status is not "OK" check what status was returned
switch strings.ToUpper(results.Status) {
case "ZERO_RESULTS":
err = errors.New("No results found.")
break
case "OVER_QUERY_LIMIT":
err = errors.New("You are over your quota.")
break
case "REQUEST_DENIED":
err = errors.New("Your request was denied.")
break
case "INVALID_REQUEST":
err = errors.New("Probably the query is missing.")
break
case "UNKNOWN_ERROR":
err = errors.New("Server error. Please, try again.")
break
default:
break
}
}
return results, err
}
// Geocoding function is used to convert an Address structure
// to a Location structure (latitude and longitude)
func Geocoding(address Address) (Location, error) {
var location Location
// Convert whitespaces to +
formattedAddress := address.FormatAddress()
formattedAddress = strings.Replace(formattedAddress, " ", "+", -1)
// Create the URL based on the formated address
url := geocodeApiUrl + "address=" + formattedAddress
// Use the API Key if it was set
if ApiKey != "" {
url += "&key=" + ApiKey
}
// Send the HTTP request and get the results
results, err := httpRequest(url)
if err != nil {
log.Println(err)
return location, err
}
// Get the results (latitude and longitude)
location.Latitude = results.Results[0].Geometry.Location.Lat
location.Longitude = results.Results[0].Geometry.Location.Lng
return location, nil
}
// Convert a structs.Results to a slice of Address structures
func convertResultsToAddress(results structs.Results) (addresses []Address) {
for index := 0; index < len(results.Results); index++ {
var address Address
// Put each component from the AddressComponents slice in the correct field in the Address structure
for _, component := range results.Results[index].AddressComponents {
// Check all types of each component
for _, types := range component.Types {
switch types {
case "route":
address.Street = component.LongName
break
case "street_number":
address.Number, _ = strconv.Atoi(component.LongName)
break
case "neighborhood":
address.Neighborhood = component.LongName
break
case "sublocality":
address.District = component.LongName
break
case "sublocality_level_1":
address.District = component.LongName
break
case "locality":
address.City = component.LongName
break
case "administrative_area_level_3":
address.City = component.LongName
break
case "administrative_area_level_2":
address.County = component.LongName
break
case "administrative_area_level_1":
address.State = component.LongName
break
case "country":
address.Country = component.LongName
break
case "postal_code":
address.PostalCode = component.LongName
break
default:
break
}
}
}
address.FormattedAddress = results.Results[index].FormattedAddress
address.Types = results.Results[index].Types[0]
addresses = append(addresses, address)
}
return
}
// GeocodingReverse function is used to convert a Location structure
// to an Address structure
func GeocodingReverse(location Location) ([]Address, error) {
var addresses []Address
url := getURLGeocodingReverse(location, "")
// Send the HTTP request and get the results
results, err := httpRequest(url)
if err != nil {
log.Println(err)
return addresses, err
}
// Convert the results to an Address slice called addresses
addresses = convertResultsToAddress(results)
return addresses, nil
}
func GeocodingReverseIntl(location Location, language string) ([]Address, error) {
var addresses []Address
url := getURLGeocodingReverse(location, language)
// Send the HTTP request and get the results
results, err := httpRequest(url)
if err != nil {
log.Println(err)
return addresses, err
}
// Convert the results to an Address slice called addresses
addresses = convertResultsToAddress(results)
return addresses, nil
}
func getURLGeocodingReverse(location Location, language string) string {
// Convert the latitude and longitude from double to string
latitude := strconv.FormatFloat(location.Latitude, 'f', 8, 64)
longitude := strconv.FormatFloat(location.Longitude, 'f', 8, 64)
// Create the URL based on latitude and longitude
url := geocodeApiUrl + "latlng=" + latitude + "," + longitude
// Use the API key if it was set
if ApiKey != "" {
url += "&key=" + ApiKey
}
if language != "" {
url += "&language=" + language
}
return url
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus"
)
// ModelSizeStats type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/_types/Model.ts#L56-L78
type ModelSizeStats struct {
AssignmentMemoryBasis *string `json:"assignment_memory_basis,omitempty"`
BucketAllocationFailuresCount int64 `json:"bucket_allocation_failures_count"`
CategorizationStatus categorizationstatus.CategorizationStatus `json:"categorization_status"`
CategorizedDocCount int `json:"categorized_doc_count"`
DeadCategoryCount int `json:"dead_category_count"`
FailedCategoryCount int `json:"failed_category_count"`
FrequentCategoryCount int `json:"frequent_category_count"`
JobId string `json:"job_id"`
LogTime DateTime `json:"log_time"`
MemoryStatus memorystatus.MemoryStatus `json:"memory_status"`
ModelBytes ByteSize `json:"model_bytes"`
ModelBytesExceeded ByteSize `json:"model_bytes_exceeded,omitempty"`
ModelBytesMemoryLimit ByteSize `json:"model_bytes_memory_limit,omitempty"`
PeakModelBytes ByteSize `json:"peak_model_bytes,omitempty"`
RareCategoryCount int `json:"rare_category_count"`
ResultType string `json:"result_type"`
Timestamp *int64 `json:"timestamp,omitempty"`
TotalByFieldCount int64 `json:"total_by_field_count"`
TotalCategoryCount int `json:"total_category_count"`
TotalOverFieldCount int64 `json:"total_over_field_count"`
TotalPartitionFieldCount int64 `json:"total_partition_field_count"`
}
func (s *ModelSizeStats) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "assignment_memory_basis":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.AssignmentMemoryBasis = &o
case "bucket_allocation_failures_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.BucketAllocationFailuresCount = value
case float64:
f := int64(v)
s.BucketAllocationFailuresCount = f
}
case "categorization_status":
if err := dec.Decode(&s.CategorizationStatus); err != nil {
return err
}
case "categorized_doc_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.CategorizedDocCount = value
case float64:
f := int(v)
s.CategorizedDocCount = f
}
case "dead_category_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.DeadCategoryCount = value
case float64:
f := int(v)
s.DeadCategoryCount = f
}
case "failed_category_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.FailedCategoryCount = value
case float64:
f := int(v)
s.FailedCategoryCount = f
}
case "frequent_category_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.FrequentCategoryCount = value
case float64:
f := int(v)
s.FrequentCategoryCount = f
}
case "job_id":
if err := dec.Decode(&s.JobId); err != nil {
return err
}
case "log_time":
if err := dec.Decode(&s.LogTime); err != nil {
return err
}
case "memory_status":
if err := dec.Decode(&s.MemoryStatus); err != nil {
return err
}
case "model_bytes":
if err := dec.Decode(&s.ModelBytes); err != nil {
return err
}
case "model_bytes_exceeded":
if err := dec.Decode(&s.ModelBytesExceeded); err != nil {
return err
}
case "model_bytes_memory_limit":
if err := dec.Decode(&s.ModelBytesMemoryLimit); err != nil {
return err
}
case "peak_model_bytes":
if err := dec.Decode(&s.PeakModelBytes); err != nil {
return err
}
case "rare_category_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.RareCategoryCount = value
case float64:
f := int(v)
s.RareCategoryCount = f
}
case "result_type":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.ResultType = o
case "timestamp":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.Timestamp = &value
case float64:
f := int64(v)
s.Timestamp = &f
}
case "total_by_field_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.TotalByFieldCount = value
case float64:
f := int64(v)
s.TotalByFieldCount = f
}
case "total_category_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.TotalCategoryCount = value
case float64:
f := int(v)
s.TotalCategoryCount = f
}
case "total_over_field_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.TotalOverFieldCount = value
case float64:
f := int64(v)
s.TotalOverFieldCount = f
}
case "total_partition_field_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.TotalPartitionFieldCount = value
case float64:
f := int64(v)
s.TotalPartitionFieldCount = f
}
}
}
return nil
}
// NewModelSizeStats returns a ModelSizeStats.
func NewModelSizeStats() *ModelSizeStats {
r := &ModelSizeStats{}
return r
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/BurntSushi/toml"
"github.com/fsnotify/fsnotify"
"learn_go/logger"
"log"
"os"
"reflect"
"strconv"
"sync"
"time"
//"github.com/filecoin-project/go-state-types/dline"
"github.com/spf13/viper"
)
var (
faultLogFileName = " fault_sectors_"
skipLogFileName = " skip_sectors_"
recoverLogFileName = " recovered_sectors_"
windowPostLogFileName = " windowpost_"
)
var (
minerWorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
apWorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
pc1WorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
pc2WorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
c1WorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
c2WorkerRecord taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
minerWorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
apWorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
pc1WorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
pc2WorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
c1WorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
c2WorkerNew taskPair = taskPair{workerRemainingMap: make(map[string]int, 50)}
SchedLogPath = "./logs"
taskWorkerRecord = map[string]taskPair{"miner": minerWorkerRecord, "ap": apWorkerRecord, "pc1": pc1WorkerRecord, "pc2": pc2WorkerRecord, "c1": c1WorkerRecord, "c2": c2WorkerRecord}
taskNewRecord = map[string]taskPair{"miner": minerWorkerNew, "ap": apWorkerNew, "pc1": pc1WorkerNew, "pc2": pc2WorkerNew, "c1": c1WorkerNew, "c2": c2WorkerNew}
scheduleTaskMaps = make(map[string]sync.Map, 1000)
taskNameMapping = map[string]string{"miner": "miner", "ap": "seal/v0/addpiece", "pc1": "seal/v0/precommit/1", "pc2": "seal/v0/precommit/2", "c1": "seal/v0/commit/1", "c2": "seal/v0/commit/2"}
Locks sync.RWMutex
)
type tomlConfig struct {
Miner miner
AP ap
PC1 pc1
PC2 pc2
C1 c1
C2 c2
}
type miner struct {
Hostnames []string
Value []int
Default int
TaskName string
}
type ap struct {
Hostnames []string
Value []int
Default int
TaskName string
}
type pc1 struct {
Hostnames []string
Value []int
Default int
TaskName string
}
type pc2 struct {
Hostnames []string
Value []int
Default int
TaskName string
}
type c1 struct {
Hostnames []string
Value []int
Default int
TaskName string
}
type c2 struct {
Hostnames []string
Value []int
Default int
TaskName string
}
func parseConfigTomlNew(config *tomlConfig,temp interface{}){
mapData := temp.(map[string]taskPair)
for i:=0;i < len(config.Miner.Hostnames);i++{
if config.Miner.Default < 0 {
mapData[config.Miner.TaskName].workerRemainingMap[config.Miner.Hostnames[i]] = config.Miner.Value[i]
}else{
mapData[config.Miner.TaskName].workerRemainingMap[config.Miner.Hostnames[i]] = config.Miner.Default
}
}
for i:=0;i < len(config.AP.Hostnames);i++{
if config.AP.Default < 0 {
mapData[config.AP.TaskName].workerRemainingMap[config.AP.Hostnames[i]] = config.AP.Value[i]
}else{
mapData[config.AP.TaskName].workerRemainingMap[config.AP.Hostnames[i]] = config.AP.Default
}
}
for i:=0;i < len(config.PC1.Hostnames);i++{
if config.PC1.Default < 0 {
mapData[config.PC1.TaskName].workerRemainingMap[config.PC1.Hostnames[i]] = config.PC1.Value[i]
}else{
mapData[config.PC1.TaskName].workerRemainingMap[config.PC1.Hostnames[i]] = config.PC1.Default
}
}
for i:=0;i < len(config.PC2.Hostnames);i++{
if config.PC2.Default < 0 {
mapData[config.PC2.TaskName].workerRemainingMap[config.PC2.Hostnames[i]] = config.PC2.Value[i]
}else{
mapData[config.PC2.TaskName].workerRemainingMap[config.PC2.Hostnames[i]] = config.PC2.Default
}
}
for i:=0;i < len(config.C1.Hostnames);i++{
if config.C1.Default < 0 {
mapData[config.C1.TaskName].workerRemainingMap[config.C1.Hostnames[i]] = config.C1.Value[i]
}else{
mapData[config.C1.TaskName].workerRemainingMap[config.C1.Hostnames[i]] = config.C1.Default
}
}
for i:=0;i < len(config.C2.Hostnames);i++{
if config.C2.Default < 0 {
mapData[config.C2.TaskName].workerRemainingMap[config.C2.Hostnames[i]] = config.C2.Value[i]
}else{
mapData[config.C2.TaskName].workerRemainingMap[config.C2.Hostnames[i]] = config.C2.Default
}
}
}
func loadTomlConfig(temp interface{}){
var config tomlConfig
if _,err := toml.DecodeFile("./worker_task_config.toml",&config);err != nil {
log.Fatal(err.Error())
}
parseConfigTomlNew(&config,temp)
}
type taskPair struct {
workerRemainingMap map[string]int
}
// LOTUS_MINER_PATH config path
// const LOTUS_MINER_PATH = "/home/qh/zhou_project"
func sectorLog(logLevel string, sectors []int, index uint64, err error) {
SectorStatusLogPath := SchedLogPath + "/" + time.Now().Format("2006-01-02 15:04:05")[:10]
indexstr := string(strconv.Itoa(int(index)))
indexstr = indexstr + ".log"
timePrefix := time.Now().Format("2006-01-02 15:04:05")
if logLevel == faultLogFileName {
logger.DebugWithFilePath(SectorStatusLogPath+"/"+timePrefix+faultLogFileName+indexstr, "len is %V data is %v \n", len(sectors), sectors)
} else if logLevel == skipLogFileName {
logger.DebugWithFilePath(SectorStatusLogPath+"/"+timePrefix+skipLogFileName+indexstr, "len is %V data is %v \n", len(sectors), sectors)
} else if logLevel == recoverLogFileName {
logger.DebugWithFilePath(SectorStatusLogPath+"/"+timePrefix+recoverLogFileName+indexstr, "len is %V data is %v \n", len(sectors), sectors)
} else if logLevel == windowPostLogFileName {
logger.DebugWithFilePath(SectorStatusLogPath+"/"+timePrefix+recoverLogFileName+indexstr, "submitPost failed: deadline is %v and err is %+v \n", index, err)
} else {
logger.DebugWithFilePath(SectorStatusLogPath+"/"+timePrefix+windowPostLogFileName+indexstr, "window post submit succssfully %v \n", "")
}
}
func getSysPathEnv() string {
var LotusMinerPath string
LotusMinerPath = os.Getenv("LOTUS_MINER_PATH")
return LotusMinerPath
}
func loadTaskConfigOld() {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "Loading worker_task_config.toml %v\n", "")
v := viper.New()
//v.SetConfigFile(getSysPathEnv() + "./worker_task_config.toml")
v.SetConfigFile("./worker_task_config.toml")
if err := v.ReadInConfig(); err != nil { // 搜索路径,并读取配置数据
// TODO 读取配置文件错误
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "Fatal error config file: %s \n", err)
}else{
for taskName, taskType := range taskWorkerRecord {
parseConfigToml(taskName, v, &taskType)
}
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "config content: %v\n", taskWorkerRecord)
}
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "Loading worker_task_config.toml successful! %v\n", "")
}
func notifyConfigChange(){
v := viper.New()
v.WatchConfig()
v.OnConfigChange(func(e fsnotify.Event){
fmt.Println("config file changed:",e.Name)
})
}
func parseConfigToml(key string, v *viper.Viper, taskType *taskPair) {
subAp := v.Sub(key)
homeList := subAp.Get("hostnames")
valueList := subAp.Get("value")
defaultVaule := subAp.GetInt("default")
switch reflect.TypeOf(homeList).Kind() {
case reflect.Slice, reflect.Array:
hostList := reflect.ValueOf(homeList)
valueList := reflect.ValueOf(valueList)
for i := 0; i < hostList.Len(); i++ {
hostname := fmt.Sprintf("%s", hostList.Index(i))
if defaultVaule < 0 {
numberTmp := fmt.Sprintf("%v", valueList.Index(i))
number, _ := strconv.Atoi(numberTmp)
taskType.workerRemainingMap[hostname] = number
} else {
taskType.workerRemainingMap[hostname] = defaultVaule
}
}
}
}
func removeWorkerFromTaskWorkerRemaining(hostname string) {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule_remove.log", "removing worker from map: %v\n", hostname)
taskTypeList := []string{"seal/v0/addpiece", "seal/v0/precommit/1", "seal/v0/precommit/2", "seal/v0/commit/1", "seal/v0/commit/2"}
for i := 0; i < len(taskTypeList); i++ {
var securityMap sync.Map
securityMap = scheduleTaskMaps[taskTypeList[i]]
securityMap.Delete(hostname)
}
logger.DebugWithFilePath(SchedLogPath+"/new_schedule_remove.log", "removing worker from map done: %v\n", hostname)
fmt.Println("delete completed")
}
func checkWorkerExistence(hostname string) bool {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "checking worker existence: %v\n", hostname)
var exist = false
for _, taskPairs := range taskWorkerRecord {
if taskPairs.workerRemainingMap[hostname] != 0 {
exist = true
}
}
if exist {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "This worker exists: %v\n", hostname)
} else {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "This worker does not exist: %v\n", hostname)
}
return exist
}
func addWorkerToTaskWorkerRemaining(hostname string) {
i := 0
for !checkWorkerExistence(hostname) {
i = i + 1
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "%v is not in current record, re-initializing record from config file...\n", hostname)
//loadTomlConfig(taskWorkerRecord)
loadTaskConfigOld()
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "loading config done! %v\n", "")
if i > 5 {
logger.DebugWithFilePath(SchedLogPath+"/check_config.log", "Check config file for : %v\n", hostname)
break
}
}
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "trying to add new worker to map: %v\n", hostname)
for tasktype, taskWorkerPair := range taskWorkerRecord {
//logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "\t Inside for loop: %v\n", "")
srcValue, _ := taskWorkerPair.workerRemainingMap[hostname]
//logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "\t source value: %v\n", srcValue)
_, ok := scheduleTaskMaps[taskNameMapping[tasktype]]
if !ok {
var tmpSyncMap sync.Map
tmpSyncMap.Store(123, 456)
scheduleTaskMaps[taskNameMapping[tasktype]] = tmpSyncMap
tmpSyncMap.Delete(123)
}
curSyncMap, ok := scheduleTaskMaps[taskNameMapping[tasktype]]
if !ok {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "\t Can not get curSyncMap for %v: %v\n", tasktype, scheduleTaskMaps)
return
}
if srcValue != 0 {
curSyncMap.Store(hostname, srcValue)
}
}
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "adding new worker to map done: %v\n", hostname)
for tasktype, securityMap := range scheduleTaskMaps {
logger.DebugWithFilePath(SchedLogPath+"/new_schedule.log", "Current remaining map for %v: %v\n", tasktype, stringfySyncMap(&securityMap))
}
}
func stringfySyncMap(amap *sync.Map) string {
m := map[string]interface{}{}
amap.Range(func(key, value interface{}) bool {
m[fmt.Sprint(key)] = value
return true
})
b, err := json.MarshalIndent(m, "", " ")
if err != nil {
panic(err)
}
return string(b)
}
// 根据主机名 - hostname,返回还可以做任务的数目
// 如果是mer,返回-10000
// 如果是未分类的任务,返回-20000
func parseSyncMap(task, hostname string) int {
var securityMap sync.Map
securityMap, ok := scheduleTaskMaps[task]
if !ok {
// unclassifed task marker as -20000
return -20000
}
hostname = "hello"
if temptask, ok := securityMap.Load(hostname); ok {
fmt.Println(temptask,reflect.TypeOf(temptask))
currentVaule := temptask.(int)
return currentVaule
}
return 0
}
func testLog() {
var temp []int
index := uint64(1)
sectorLog("", temp, index, nil)
}
// UpdateRecordConfig
func UpdateRecordConfig() {
fmt.Println("=================")
time.Sleep(time.Second*60)
loadTomlConfig(taskNewRecord)
fmt.Println(taskNewRecord)
for taskType, taskWorkerPair := range taskNewRecord {
for hostname, _ := range taskWorkerPair.workerRemainingMap {
Locks.Lock()
currentValue := parseSyncMap(taskNameMapping[taskType], hostname)
fmt.Println(currentValue,taskNewRecord[taskType].workerRemainingMap[hostname],taskWorkerRecord[taskType].workerRemainingMap[hostname])
newNumber := taskNewRecord[taskType].workerRemainingMap[hostname] - taskWorkerRecord[taskType].workerRemainingMap[hostname]
fmt.Println(newNumber)
for true{
securityMap,ok := scheduleTaskMaps[taskNameMapping[taskType]]
fmt.Println("test ok: ",ok)
if ok {
securityMap.Store(hostname, currentValue + newNumber)
break
}
}
fmt.Println("!!!! should be: ", currentValue + newNumber)
Locks.Unlock()
}
}
// TODO x'
//taskWorkerRecord, taskNewRecord = taskNewRecord, taskWorkerRecord
fmt.Println("======================================================")
for taskType, securityMap := range scheduleTaskMaps {
logger.DebugWithFilePath(SchedLogPath+"/reload_task_config.log", "Current remaining map for %v: %v\n", taskType, stringfySyncMap(&securityMap))
}
logger.DebugWithFilePath(SchedLogPath+"/reload_task_config.log", "\n\n Previous config map is: %v \n\n Current config map is %v\n\n", taskNewRecord, taskWorkerRecord)
}
func main() {
//loadTomlConfig(taskWorkerRecord)
addWorkerToTaskWorkerRemaining("xiaohong")
//addWorkerToTaskWorkerRemaining("miner")
//fmt.Println(scheduleTaskMaps)
//time.Sleep(60*time.Second)
//currentValue := parseSyncMap("seal/v0/commit/1", "xiaohong")
//fmt.Println(currentValue)
//currentValue1 := parseSyncMap("seal/v0/precommit/1", "xiaohong")
//fmt.Println(currentValue1)
//removeWorkerFromTaskWorkerRemaining("xiaohong")
//currentValue2 := parseSyncMap("seal/v0/commit/1", "xiaohong")
//fmt.Println(currentValue2)
//for tasktypes,securityMap := range scheduleTaskMaps{
// fmt.Println( tasktypes, securityMap)
//}
//fmt.Println(reflect.TypeOf(s2))
//removeWorkerFromTaskWorkerRemaining("xiaohong")
UpdateRecordConfig()
ss := make(map[string]sync.Map, 100)
var s1 sync.Map
s1.Store("hello",0)
ss["ap"] = s1
s2 := ss["ap"]
if val,ok := s2.Load("hello");ok{
if val != nil {
fmt.Println(val,reflect.TypeOf(val))
}else{
fmt.Println(nil)
}
}
panic()
//for tasktype, securityMap := range scheduleTaskMaps {
// fmt.Println( tasktype, stringfySyncMap(&securityMap))
//}
//loadTaskConfigOld()
//time.Sleep(time.Second*60)
}
|
package virtual_security
import (
"reflect"
"testing"
"time"
)
type testStockContractComponent struct {
iStockContractComponent
isContractableTime1 bool
confirmStockOrderContract1 *confirmContractResult
confirmMarginOrderContract1 *confirmContractResult
}
func (t *testStockContractComponent) isContractableTime(StockExecutionCondition, time.Time) bool {
return t.isContractableTime1
}
func (t *testStockContractComponent) confirmStockOrderContract(*stockOrder, *symbolPrice, time.Time) *confirmContractResult {
return t.confirmStockOrderContract1
}
func (t *testStockContractComponent) confirmMarginOrderContract(*marginOrder, *symbolPrice, time.Time) *confirmContractResult {
return t.confirmMarginOrderContract1
}
func Test_newStockContractComponent(t *testing.T) {
t.Parallel()
want := &stockContractComponent{}
got := newStockContractComponent()
if !reflect.DeepEqual(want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), want, got)
}
}
func Test_stockContractComponent_isContractableTime(t *testing.T) {
t.Parallel()
tests := []struct {
name string
stockOrder *stockOrder
arg1 StockExecutionCondition
arg2 time.Time
want bool
}{
{name: "場が前場でザラバで約定する注文であればtrue",
arg1: StockExecutionConditionMO,
arg2: time.Date(0, 1, 1, 10, 0, 0, 0, time.Local),
want: true},
{name: "場が前場で前場の寄りで約定する注文であればtrue",
stockOrder: &stockOrder{ExecutionCondition: StockExecutionConditionMOMO},
arg1: StockExecutionConditionMOMO,
arg2: time.Date(0, 1, 1, 10, 0, 0, 0, time.Local),
want: true},
{name: "場が前場で前場の引けで約定する注文であればtrue",
arg1: StockExecutionConditionMOMC,
arg2: time.Date(0, 1, 1, 11, 30, 0, 0, time.Local),
want: true},
{name: "場が前場で後場の寄りで約定する注文であればfalse",
arg1: StockExecutionConditionMOAO,
arg2: time.Date(0, 1, 1, 10, 0, 0, 0, time.Local),
want: false},
{name: "場が前場で前場の引けで約定する注文であればfalse",
arg1: StockExecutionConditionMOAC,
arg2: time.Date(0, 1, 1, 10, 0, 0, 0, time.Local),
want: false},
{name: "場が後場でザラバで約定する注文であればtrue",
arg1: StockExecutionConditionMO,
arg2: time.Date(0, 1, 1, 14, 0, 0, 0, time.Local),
want: true},
{name: "場が後場で前場の寄りで約定する注文であればfalse",
arg1: StockExecutionConditionMOMO,
arg2: time.Date(0, 1, 1, 14, 0, 0, 0, time.Local),
want: false},
{name: "場が後場で前場の引けで約定する注文であればtrue",
arg1: StockExecutionConditionMOMC,
arg2: time.Date(0, 1, 1, 14, 0, 0, 0, time.Local),
want: false},
{name: "場が後場で後場の寄りで約定する注文であればfalse",
arg1: StockExecutionConditionMOAO,
arg2: time.Date(0, 1, 1, 14, 0, 0, 0, time.Local),
want: true},
{name: "場が後場で後場の引けで約定する注文であればtrue",
arg1: StockExecutionConditionMOAC,
arg2: time.Date(0, 1, 1, 15, 0, 0, 0, time.Local),
want: true},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.isContractableTime(test.arg1, test.arg2)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmContractItayoseMO(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 Side
arg2 *symbolPrice
arg3 time.Time
want *confirmContractResult
}{
{name: "引数がnilなら約定しない",
arg1: SideBuy,
arg2: nil,
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、買い注文なら、売り気配値で約定する",
arg1: SideBuy,
arg2: &symbolPrice{Ask: 1000},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、買い注文でも、売り気配値がなければ約定しない",
arg1: SideBuy,
arg2: &symbolPrice{},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、売り注文なら、買い気配値で約定する",
arg1: SideSell,
arg2: &symbolPrice{Bid: 900},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 900,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、売り注文でも、買い気配値がなければ約定しない",
arg1: SideSell,
arg2: &symbolPrice{},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があっても、現値時刻が5s以内でなければ約定しない",
arg1: SideSell,
arg2: &symbolPrice{Price: 1100, PriceTime: time.Date(2021, 5, 12, 10, 59, 55, 0, time.Local)},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があって、現値時刻が5s以内なら、現値で約定する",
arg1: SideSell,
arg2: &symbolPrice{Price: 1100, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1100,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmContractItayoseMO(test.arg1, test.arg2, test.arg3)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmContractAuctionMO(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 Side
arg2 *symbolPrice
arg3 time.Time
want *confirmContractResult
}{
{name: "引数がnilなら約定しない",
arg1: SideBuy,
arg2: nil,
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "買い注文なら、売り気配値で約定する",
arg1: SideBuy,
arg2: &symbolPrice{Ask: 1000},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "買い注文でも、売り気配値がなければ約定しない",
arg1: SideBuy,
arg2: &symbolPrice{},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "売り注文なら、買い気配値で約定する",
arg1: SideSell,
arg2: &symbolPrice{Bid: 900},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 900,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "売り注文でも、買い気配値がなければ約定しない",
arg1: SideSell,
arg2: &symbolPrice{},
arg3: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmContractAuctionMO(test.arg1, test.arg2, test.arg3)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmContractItayoseLO(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 Side
arg2 float64
arg3 *symbolPrice
arg4 time.Time
want *confirmContractResult
}{
{name: "引数がnilなら約定しない",
arg1: SideBuy,
arg2: 1001,
arg3: nil,
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、買い注文で、売り気配値があり、指値が売り気配値より高いなら、売り気配値で約定する",
arg1: SideBuy,
arg2: 1001,
arg3: &symbolPrice{Ask: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、買い注文で、売り気配値があり、指値が売り気配値と同じなら、売り気配値で約定する",
arg1: SideBuy,
arg2: 1000,
arg3: &symbolPrice{Ask: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、買い注文で、売り気配値があり、指値が売り気配値より安いなら、約定しない",
arg1: SideBuy,
arg2: 999,
arg3: &symbolPrice{Ask: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、買い注文で、売り気配値がなければ、約定しない",
arg1: SideBuy,
arg2: 999,
arg3: &symbolPrice{},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、売り注文で、買い気配値があり、指値が買い気配値より高いなら、約定しない",
arg1: SideSell,
arg2: 1001,
arg3: &symbolPrice{Bid: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値がなく、売り注文で、買い気配値があり、指値が買い気配値と同じなら、買い気配値で約定する",
arg1: SideSell,
arg2: 1000,
arg3: &symbolPrice{Bid: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、売り注文で、買い気配値があり、指値が買い気配値より安いなら、買い気配値で約定する",
arg1: SideSell,
arg2: 999,
arg3: &symbolPrice{Bid: 1000},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値がなく、売り注文で、買い気配値がなければ、約定しない",
arg1: SideSell,
arg2: 999,
arg3: &symbolPrice{},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があり、現値時刻が5s前なら、約定しない",
arg1: SideBuy,
arg2: 1000,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 55, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があり、現値時刻が5s以内で、買い注文で、指値が現値より高いなら、現値で約定する",
arg1: SideBuy,
arg2: 1001,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値があり、現値時刻が5s以内で、買い注文で、指値が現値と同じなら、現値で約定する",
arg1: SideBuy,
arg2: 1000,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値があり、現値時刻が5s以内で、買い注文で、指値が現値より安いなら、約定しない",
arg1: SideBuy,
arg2: 999,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があり、現値時刻が5s以内で、売り注文で、指値が現値より高いなら、約定しない",
arg1: SideSell,
arg2: 1001,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "現値があり、現値時刻が5s以内で、売り注文で、指値が現値と同じなら、現値で約定する",
arg1: SideSell,
arg2: 1000,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "現値があり、現値時刻が5s以内で、売り注文で、指値が現値より安いなら、現値で約定する",
arg1: SideSell,
arg2: 999,
arg3: &symbolPrice{Price: 1000, PriceTime: time.Date(2021, 5, 12, 10, 59, 56, 0, time.Local)},
arg4: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmContractItayoseLO(test.arg1, test.arg2, test.arg3, test.arg4)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmContractAuctionLO(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 Side
arg2 float64
arg3 bool
arg4 *symbolPrice
arg5 time.Time
want *confirmContractResult
}{
{name: "引数がnilなら約定しない",
arg1: SideBuy,
arg2: 1001,
arg3: true,
arg4: nil,
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "買い注文で、売り気配値があり、指値が売り気配値より高いなら、指値で約定する",
arg1: SideBuy,
arg2: 1001,
arg3: true,
arg4: &symbolPrice{Ask: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1001,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "買い注文で、売り気配値があり、指値が売り気配値より高く、初回約定確認なら、気配値で約定する",
arg1: SideBuy,
arg2: 1001,
arg3: false,
arg4: &symbolPrice{Ask: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "買い注文で、売り気配値があり、指値が売り気配値と同じなら、約定しない",
arg1: SideBuy,
arg2: 1000,
arg3: true,
arg4: &symbolPrice{Ask: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "買い注文で、売り気配値があり、指値が売り気配値より安いなら、約定しない",
arg1: SideBuy,
arg2: 999,
arg3: true,
arg4: &symbolPrice{Ask: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "買い注文で、売り気配値がなければ、約定しない",
arg1: SideBuy,
arg2: 999,
arg3: true,
arg4: &symbolPrice{},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "売り注文で、買い気配値があり、指値が買い気配値より高いなら、約定しない",
arg1: SideSell,
arg2: 1001,
arg3: true,
arg4: &symbolPrice{Bid: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "売り注文で、買い気配値があり、指値が買い気配値と同じなら、約定しない",
arg1: SideSell,
arg2: 1000,
arg3: true,
arg4: &symbolPrice{Bid: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "売り注文で、買い気配値があり、指値が買い気配値より安いなら、指値で約定する",
arg1: SideSell,
arg2: 999,
arg3: true,
arg4: &symbolPrice{Bid: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 999,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "売り注文で、買い気配値があり、指値が買い気配値でより安く、初回約定確認なら、板で約定する",
arg1: SideSell,
arg2: 999,
arg3: false,
arg4: &symbolPrice{Bid: 1000},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{
isContracted: true,
price: 1000,
contractedAt: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
}},
{name: "売り注文で、買い気配値がなければ、約定しない",
arg1: SideSell,
arg2: 999,
arg3: true,
arg4: &symbolPrice{},
arg5: time.Date(2021, 5, 12, 11, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmContractAuctionLO(test.arg1, test.arg2, test.arg3, test.arg4, test.arg5)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmOrderContract(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 StockExecutionCondition
arg2 Side
arg3 float64
arg4 bool
arg5 *symbolPrice
arg6 time.Time
want *confirmContractResult
}{
{name: "引数がnilなら約定しない",
arg5: nil,
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "成行が寄り価格で約定する",
arg1: StockExecutionConditionMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "成行が引け価格で約定する",
arg1: StockExecutionConditionMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 2, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "成行がザラバで約定する",
arg1: StockExecutionConditionMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "成行がタイミング不明なら約定しない",
arg1: StockExecutionConditionMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindUnspecified},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄成前場が寄り価格で約定する",
arg1: StockExecutionConditionMOMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "寄成前場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionMOMO,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄成前場が後場では約定しない",
arg1: StockExecutionConditionMOMO,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄成後場が寄り価格で約定する",
arg1: StockExecutionConditionMOAO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "寄成後場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionMOAO,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄成後場が前場では約定しない",
arg1: StockExecutionConditionMOAO,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引成前場が引け価格で約定する",
arg1: StockExecutionConditionMOMC,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 11, 30, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 11, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 11, 30, 3, 0, time.Local)}},
{name: "引成前場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionMOMC,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引成前場が後場では約定しない",
arg1: StockExecutionConditionMOMC,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引成後場が引け価格で約定する",
arg1: StockExecutionConditionMOAC,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "引成後場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionMOAC,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引成後場が前場では約定しない",
arg1: StockExecutionConditionMOAC,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false},
},
{name: "IOC成行が寄り価格で約定する",
arg1: StockExecutionConditionIOCMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "IOC成行が引け価格で約定する",
arg1: StockExecutionConditionIOCMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 2, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "IOC成行がザラバで約定する",
arg1: StockExecutionConditionIOCMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "IOC成行がタイミング不明なら約定しない",
arg1: StockExecutionConditionIOCMO,
arg2: SideBuy,
arg3: 0.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindUnspecified},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "IOC成行が1度でも約定確認されていたら約定しない",
arg1: StockExecutionConditionIOCMO,
arg2: SideBuy,
arg3: 0.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "指値が寄り価格で約定する",
arg1: StockExecutionConditionLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "指値が引け価格で約定する",
arg1: StockExecutionConditionLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 2, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "指値がザラバで約定する",
arg1: StockExecutionConditionLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "指値がタイミング不明なら約定しない",
arg1: StockExecutionConditionLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindUnspecified},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄指前場が寄り価格で約定する",
arg1: StockExecutionConditionLOMO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "寄指前場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionLOMO,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄指前場が後場では約定しない",
arg1: StockExecutionConditionLOMO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 13, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 13, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄指後場が寄り価格で約定する",
arg1: StockExecutionConditionLOAO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "寄指値後場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionLOMO,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "寄指後場が前場では約定しない",
arg1: StockExecutionConditionLOAO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引指前場が引け価格で約定する",
arg1: StockExecutionConditionLOMC,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 11, 30, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 11, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 11, 30, 3, 0, time.Local)}},
{name: "引指前場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionLOMC,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引指前場が後場では約定しない",
arg1: StockExecutionConditionLOMC,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引指後場が引け価格で約定する",
arg1: StockExecutionConditionLOAC,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "引指後場が2回目以降の確認では約定しない",
arg1: StockExecutionConditionLOAC,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "引指後場が前場では約定しない",
arg1: StockExecutionConditionLOAC,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "不成前場は前場の寄りではオークションの指値で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "不成前場は前場のザラバでは指値で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "不成前場は前場の引けではオークションの成行で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1200, PriceTime: time.Date(2021, 5, 12, 11, 30, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 11, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1200, contractedAt: time.Date(2021, 5, 12, 11, 30, 0, 0, time.Local)}},
{name: "不成前場は後場の寄りではオークションの指値で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "不成前場は後場のザラバでは指値で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "不成前場は後場の引けではオークションの指値で約定する",
arg1: StockExecutionConditionFunariM,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 990, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local)}},
{name: "不成後場は前場の寄りではオークションの指値で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "不成後場は前場のザラバでは指値で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "不成後場は前場の引けではオークションの指値で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 990, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "不成後場は後場の寄りではオークションの指値で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "不成後場は後場のザラバでは指値で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 12, 30, 0, 0, time.Local)}},
{name: "不成後場は後場の引けではオークションの成行で約定する",
arg1: StockExecutionConditionFunariA,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1200, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1200, contractedAt: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local)}},
{name: "執行条件が逆指値の場合は約定しない",
arg1: StockExecutionConditionStop,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1200, PriceTime: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "IOC指値が寄り価格で約定する",
arg1: StockExecutionConditionIOCLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local), kind: PriceKindOpening},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "IOC指値が引け価格で約定する",
arg1: StockExecutionConditionIOCLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Price: 1000, PriceTime: time.Date(2021, 5, 12, 15, 0, 2, 0, time.Local), kind: PriceKindClosing},
arg6: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 5, 12, 15, 0, 3, 0, time.Local)}},
{name: "IOC指値がザラバで約定する",
arg1: StockExecutionConditionIOCLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 990, kind: PriceKindRegular},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 990, contractedAt: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local)}},
{name: "IOC指値がタイミング不明なら約定しない",
arg1: StockExecutionConditionIOCLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: false,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindUnspecified},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "IOC指値が1度でも約定確認されていたら約定しない",
arg1: StockExecutionConditionIOCLO,
arg2: SideBuy,
arg3: 1000.0,
arg4: true,
arg5: &symbolPrice{SymbolCode: "1234", Ask: 1000, kind: PriceKindUnspecified},
arg6: time.Date(2021, 5, 12, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmOrderContract(test.arg1, test.arg2, test.arg3, test.arg4, test.arg5, test.arg6)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmStockOrderContract(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 *stockOrder
arg2 *symbolPrice
arg3 time.Time
want *confirmContractResult
}{
{name: "注文がnilなら約定しない",
arg1: nil,
want: &confirmContractResult{isContracted: false}},
{name: "価格がnilなら約定しない",
arg1: &stockOrder{},
arg2: nil,
want: &confirmContractResult{isContracted: false}},
{name: "注文と価格の銘柄が一致しないなら約定しない",
arg1: &stockOrder{SymbolCode: "1234"},
arg2: &symbolPrice{SymbolCode: "0000"},
want: &confirmContractResult{isContracted: false}},
{name: "注文が約定可能な状態でないなら約定しない",
arg1: &stockOrder{SymbolCode: "1234", OrderStatus: OrderStatusDone},
arg2: &symbolPrice{SymbolCode: "1234"},
want: &confirmContractResult{isContracted: false}},
{name: "confirmOrderContractが呼び出される(未約定)",
arg1: &stockOrder{SymbolCode: "1234", OrderStatus: OrderStatusInOrder},
arg2: &symbolPrice{SymbolCode: "1234"},
arg3: time.Date(2021, 8, 13, 0, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "confirmOrderContractが呼び出される(約定)",
arg1: &stockOrder{SymbolCode: "1234", OrderStatus: OrderStatusInOrder, Side: SideBuy, ExecutionCondition: StockExecutionConditionMO, OrderQuantity: 1},
arg2: &symbolPrice{SymbolCode: "1234", Ask: 1000, AskTime: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local), kind: PriceKindRegular},
arg3: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local)}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmStockOrderContract(test.arg1, test.arg2, test.arg3)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_stockContractComponent_confirmMarginOrderContract(t *testing.T) {
t.Parallel()
tests := []struct {
name string
arg1 *marginOrder
arg2 *symbolPrice
arg3 time.Time
want *confirmContractResult
}{
{name: "注文がnilなら約定しない",
arg1: nil,
want: &confirmContractResult{isContracted: false}},
{name: "価格がnilなら約定しない",
arg1: &marginOrder{},
arg2: nil,
want: &confirmContractResult{isContracted: false}},
{name: "注文と価格の銘柄が一致しないなら約定しない",
arg1: &marginOrder{SymbolCode: "1234"},
arg2: &symbolPrice{SymbolCode: "0000"},
want: &confirmContractResult{isContracted: false}},
{name: "注文が約定可能な状態でないなら約定しない",
arg1: &marginOrder{SymbolCode: "1234", OrderStatus: OrderStatusDone},
arg2: &symbolPrice{SymbolCode: "1234"},
want: &confirmContractResult{isContracted: false}},
{name: "confirmOrderContractが呼び出される(未約定)",
arg1: &marginOrder{SymbolCode: "1234", OrderStatus: OrderStatusInOrder},
arg2: &symbolPrice{SymbolCode: "1234"},
arg3: time.Date(2021, 8, 13, 0, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: false}},
{name: "confirmOrderContractが呼び出される(約定)",
arg1: &marginOrder{SymbolCode: "1234", OrderStatus: OrderStatusInOrder, Side: SideBuy, ExecutionCondition: StockExecutionConditionMO, OrderQuantity: 1},
arg2: &symbolPrice{SymbolCode: "1234", Ask: 1000, AskTime: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local), kind: PriceKindRegular},
arg3: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local),
want: &confirmContractResult{isContracted: true, price: 1000, contractedAt: time.Date(2021, 8, 13, 9, 0, 0, 0, time.Local)}},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
component := &stockContractComponent{}
got := component.confirmMarginOrderContract(test.arg1, test.arg2, test.arg3)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
|
package main
import "fmt"
var a , b int
func main() {
fmt.Printf("a, b ? ")
fmt.Scanf("%d %d", &a, &b)
fmt.Printf( "a=%d, b=%d\n", a, b )
a^=b
b^=a
a^=b
fmt.Printf( "a=%d, b=%d\n", a, b )
}
|
package plugin
import (
"archive/zip"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/grafana/plugin-validator/pkg/grafana"
)
// checkContext contains useful paths and data available to checker.
type checkContext struct {
RootDir string
DistDir string
SrcDir string
MetadataPath string
Readme []byte
Metadata []byte
}
type checkSeverity string
const (
checkSeverityError checkSeverity = "error"
checkSeverityWarning checkSeverity = "warning"
)
type checker interface {
check(ctx *checkContext) ([]ValidationComment, error)
}
// Ref describes a plugin version on GitHub.
type Ref struct {
Username string `json:"username"`
Repo string `json:"repo"`
Ref string `json:"ref"`
}
// ValidationComment contains a comment returned by one of the checkers.
type ValidationComment struct {
Severity checkSeverity `json:"level"`
Message string `json:"message"`
Details string `json:"details"`
}
// ErrPluginNotFound is returned whenever a plugin could be found for a given ref.
var ErrPluginNotFound = errors.New("plugin not found")
// Check executes a number of checks to validate a plugin.
func Check(url string, schemaPath string, client *grafana.Client) (json.RawMessage, []ValidationComment, error) {
ref, err := parseRef(url)
if err != nil {
return nil, nil, err
}
archiveURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/zipball/%s", ref.Username, ref.Repo, ref.Ref)
resp, err := http.Get(archiveURL)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound {
return nil, nil, ErrPluginNotFound
}
return nil, nil, fmt.Errorf("unexpected status: %s", resp.Status)
}
// Extract the ZIP archive in a temporary directory.
rootDir, cleanup, err := extractPlugin(resp.Body)
if err != nil {
return nil, nil, err
}
defer cleanup()
var (
distDir = filepath.Join(rootDir, "dist")
srcDir = filepath.Join(rootDir, "src")
)
// TODO: If there's no plugin.json or README, several checks will fail.
// Ideally, each checker would declare checkers it depends on, and only run
// if those checkers ran successfully.
var fatalErrs []ValidationComment
readmePath, err := fallbackDir("README.md", distDir, rootDir)
if err != nil {
if err == errFileNotFound {
fatalErrs = append(fatalErrs, ValidationComment{
Severity: "error",
Message: "Missing README",
Details: "Plugins require a `README.md` file, but we couldn't find one. The README should provide instructions to the users on how to use the plugin.",
})
} else {
return nil, nil, err
}
}
metadataPath, err := fallbackDir("plugin.json", distDir, srcDir)
if err != nil {
if err == errFileNotFound {
fatalErrs = append(fatalErrs, ValidationComment{
Severity: "error",
Message: "Missing metadata",
Details: "Plugins require a `plugin.json` file, but we couldn't find one. For more information, refer to [plugin.json](https://grafana.com/docs/grafana/latest/developers/plugins/metadata/).",
})
} else {
return nil, nil, err
}
}
if len(fatalErrs) > 0 {
return nil, fatalErrs, nil
}
metadata, err := ioutil.ReadFile(metadataPath)
if err != nil {
return nil, nil, err
}
readme, err := ioutil.ReadFile(readmePath)
if err != nil {
return nil, nil, err
}
ctx := &checkContext{
RootDir: rootDir,
DistDir: distDir,
SrcDir: srcDir,
MetadataPath: metadataPath,
Readme: readme,
Metadata: metadata,
}
username := usernameFromMetadata(metadata)
checkers := []checker{
&distExistsChecker{},
&orgExistsChecker{username: username, client: client},
&pluginIDFormatChecker{},
&pluginNameChecker{},
&pluginIDHasTypeSuffixChecker{},
&jsonSchemaChecker{schema: schemaPath},
&linkChecker{},
&pluginPlatformChecker{},
&screenshotChecker{},
&developerJargonChecker{},
&templateReadmeChecker{},
}
errs := []ValidationComment{}
// Check and collect all errors.
for _, checker := range checkers {
newerrs, err := checker.check(ctx)
if err != nil {
return nil, nil, err
}
errs = append(errs, newerrs...)
}
return json.RawMessage(metadata), errs, nil
}
// usernameFromMetadata returns the first part of the plugin ID.
func usernameFromMetadata(metadata []byte) string {
var meta struct {
ID string `json:"id"`
}
json.Unmarshal(metadata, &meta)
fields := strings.Split(meta.ID, "-")
if len(fields) > 0 {
return fields[0]
}
return ""
}
func extractPlugin(body io.Reader) (string, func(), error) {
// Create a file for the zipball.
zipball, err := ioutil.TempFile("", "")
if err != nil {
return "", nil, err
}
defer zipball.Close()
defer os.Remove(zipball.Name())
if _, err := io.Copy(zipball, body); err != nil {
return "", nil, err
}
// Create a directory where we'll extract the archive.
output, err := ioutil.TempDir("", "")
if err != nil {
return "", nil, err
}
cleanup := func() {
os.RemoveAll(output)
}
if _, err := unzip(zipball.Name(), output); err != nil {
cleanup()
return "", nil, err
}
infos, err := ioutil.ReadDir(output)
if err != nil {
cleanup()
return "", nil, err
}
if len(infos) != 1 {
cleanup()
return "", nil, fmt.Errorf("unzip: expected 1 directory but got %d", len(infos))
}
pluginRoot := filepath.Join(output, infos[0].Name())
return pluginRoot, cleanup, nil
}
func unzip(src string, dest string) ([]string, error) {
var filenames []string
r, err := zip.OpenReader(src)
if err != nil {
return filenames, err
}
defer r.Close()
for _, f := range r.File {
// Store filename/path for returning and using later on
fpath := filepath.Join(dest, f.Name)
// Check for ZipSlip. More Info: http://bit.ly/2MsjAWE
if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) {
return filenames, fmt.Errorf("%s: illegal file path", fpath)
}
filenames = append(filenames, fpath)
if f.FileInfo().IsDir() {
// Make Folder
os.MkdirAll(fpath, os.ModePerm)
continue
}
// Make File
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return filenames, err
}
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return filenames, err
}
rc, err := f.Open()
if err != nil {
return filenames, err
}
_, err = io.Copy(outFile, rc)
// Close the file without defer to close before next iteration of loop
outFile.Close()
rc.Close()
if err != nil {
return filenames, err
}
}
return filenames, nil
}
|
package main
import (
"crypto/md5"
"crypto/rsa"
"encoding/json"
"hussain/thorium-go/requests"
"net/http"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/go-martini/martini"
)
import "os/exec"
import "fmt"
import "redis"
import "bytes"
import "crypto/rand"
import "database/sql"
import _ "github.com/lib/pq"
//For now these field are from the sql table of games,
//proper struct should be
/*
type ContractInformation struct {
OfferedBy string
TimeRemaining int
Bid int
Ask int
}
*/
type ContractInformation struct {
game_id int
map_name string
max_players int
is_verified bool
}
func Check(prog string) int {
acceptedList := []string{"boltactiongame", "test"}
for i := 0; i < len(acceptedList); i++ {
if acceptedList[i] == prog {
return 1
}
}
return 0
}
//returns cmd struct of program
func Execute(prog string) *exec.Cmd {
cmd := exec.Command("./" + prog)
e := cmd.Start()
if e != nil {
fmt.Println("Error runninng program ", e)
}
return cmd
}
func RedisPush(rawtokenstring string) int {
spec := redis.DefaultSpec().Password("go-redis")
client, e := redis.NewSynchClientWithSpec(spec)
if e != nil {
fmt.Println("error creating client for: ", e)
}
defer client.Quit()
//pidString := strconv.Itoa(cmdI.Process.Pid)
decryptedToken, err := jwt.Parse(rawtokenstring, func(token *jwt.Token) (interface{}, error) {
return []byte(secretKey), nil
})
//fmt.Printf("token strings\nRaw: [%s]\nHeader: [%s]\nSignature: [%s]\n", decryptedToken.Raw, decryptedToken.Header, decryptedToken.Signature)
//check if no error and valid token
if err == nil && decryptedToken.Valid {
fmt.Println("token is valid and not expired")
} else {
fmt.Println("Not valid: ", err)
return 0
}
userID := decryptedToken.Claims["id"].(string)
//fmt.Println("redis func userid: " + userID + "\n redis func raw: " + decryptedToken.Raw)
var buf bytes.Buffer
buf.Write([]byte(decryptedToken.Raw))
e = client.Hset("clients/token", userID, buf.Bytes())
//to retrieve token in redis-cli, do hget clients/tokens legacy
if e != nil {
fmt.Println("error writing to list")
return 0
}
return 1
}
func PostGresQueryIDS() []int {
db, err := sql.Open("postgres", "user=thoriumnet password=thoriumtest dbname=thoriumnet host=localhost")
if err != nil {
fmt.Println("err: ", err)
}
var game_id int
game_ids := make([]int, 100)
rows, err := db.Query("SELECT * FROM games;")
if err != nil {
fmt.Println("err2: ", err)
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&game_id)
if err != nil {
fmt.Println("err3: ", err)
}
for index, _ := range game_ids {
if game_ids[index] == 0 {
game_ids[index] = game_id
break
}
}
}
return game_ids
}
var secretKey string
func main() {
//rand.Seed(time.Now().UnixNano())
//var portArg = flag.Int("p", rand.Intn(65000-10000)+10000, "specifies port, default is random int between 10000-65000")
//var mapArg = flag.String("m", "default map value", "description of map")
//flag.Parse()
//fmt.Println(strconv.Itoa(*portArg))
//fmt.Println(*mapArg)
//rand.Seed = 1
//processL := make([]*exec.Cmd, 100)
//currentGames := PostGresQueryIDS()
//for _, value := range currentGames {
//if value != 0 {
//fmt.Println(value)
//}
//}
m := martini.Classic()
secretKey = "superdupersecretkey"
/*m.Post("/launch/:name", func(params martini.Params) string {
e := Check(params["name"])
if e==1 {
cmdInfo := Execute(params["name"])
for i:=0; i<len(processL); i++ {
if processL[i]==nil {
processL[i]=cmdInfo
//suc := RedisPush(cmdInfo)
RedisPush(cmdInfo)
break
}
}
//fmt.Println(processL)
return "launching " + params["name"] + "with pid " + strconv.Itoa(cmdInfo.Process.Pid)
} else {
return "not accepted"
}
})
*/
//m.Get("/games", gameServerInfo)
m.Post("/client/login", handleClientLogin)
m.Post("/client/afterlogin", handleAfterLogin)
m.Run()
// err := cmd.Wait()
// fmt.Println(err)
// fmt.Println(cmd.Path)
// fmt.Println(cmd.Process.Pid)
}
func handleAfterLogin(httpReq *http.Request) (int, string) {
var req request.Test
decoder := json.NewDecoder(httpReq.Body)
err := decoder.Decode(&req)
if err != nil {
fmt.Println("error decoding token request")
return 500, "Internal Server Error"
}
//need to return the secret key to the parse to verify the token
decryptedToken, err := jwt.Parse(req.Token, func(token *jwt.Token) (interface{}, error) {
return []byte(secretKey), nil
})
fmt.Printf("token strings\nRaw: [%s]\nHeader: [%s]\nSignature: [%s]\n", decryptedToken.Raw, decryptedToken.Header, decryptedToken.Signature)
//check if no error and valid token
if err == nil && decryptedToken.Valid {
fmt.Println("token is valid and not expired")
//wrote this to check expirey but .Valid already does that
/*
expiredTime := decryptedToken.Claims["exp"].(float64)
if float64(time.Now().Unix()) > expiredTime {
return 500, "token expired get out of here"
} else {
return 200, "token is valid and not expired"
}
fmt.Println(decryptedToken.Claims)
*/
} else {
fmt.Println("Not valid: ", err)
return 500, "Internal Server Error"
}
return 200, "ok"
}
func handleClientLogin(httpReq *http.Request) (int, string) {
decoder := json.NewDecoder(httpReq.Body)
var req request.Authentication
err := decoder.Decode(&req)
if err != nil {
//logerr("Error decoding authentication request")
fmt.Println("error with json: ", err)
return 500, "Internal Server Error"
}
//need to check if username && password are correct.
/*
if req.Username == database username && req.Password == database password
then we can start to generate the token
*/
//create new token
token := jwt.New(jwt.SigningMethodRS256)
//secret key is used for signing and verifying token
//secretKey := "superdupersecretkey"
//generate private/public key for encrypting/decrypting token claims (if we need to)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
fmt.Println("Could not generate key: ", err)
return 500, "Internal Server Error"
}
//get the public key from the private key
publicKey := &privateKey.PublicKey
//need these vars for encryption/decryption
md5hash := md5.New()
label := []byte("")
//actual encryption
encryptedUsername, err := rsa.EncryptOAEP(md5hash, rand.Reader, publicKey, []byte(req.Username), label)
if err != nil {
fmt.Println("error encrypting: ", err)
}
//set the UserID value to the encrypted username, not sure if needed.
token.Claims["id"] = req.Username
//2 minute expiery
token.Claims["exp"] = time.Now().Add(time.Minute * 2).Unix()
//decrypt to check if encryption worked properly
decryptedUsername, err := rsa.DecryptOAEP(md5hash, rand.Reader, privateKey, encryptedUsername, label)
if err != nil {
fmt.Println("error decrypting: ", err)
}
fmt.Printf("decrypted [%x] to \n[%s]\n", token.Claims["UserID"], decryptedUsername)
/*
encrypter, err := NewEncrypter(RSA_OAEP,A128GCM, publicKey)
if err != nil {
fmt.Println("Algorithm not supported")
return 500, "Internal Server Error"
}
*/
//fmt.Println(*publicKey)
//need to sign the token with something, for now its a random string
tokenString, err := token.SignedString([]byte(secretKey))
if err != nil {
fmt.Println("error getting signed key: ", err)
return 500, "Internal Server Error"
}
success := RedisPush(tokenString)
if success == 1 {
fmt.Println("pushed to redis")
} else {
fmt.Println("could not push to redis")
}
//return 200, tokenString + "\n"
fmt.Println("Token String: ", tokenString)
return 200, tokenString
}
/*
func gameServerInfo() string {
db, err := sql.Open("postgres", "user=thoriumnet password=thoriumtest dbname=thoriumnet host=localhost")
if err != nil {
fmt.Println("database conn err: ", err)
//return 500, err
}
//var tx *sql.Tx
//tx, e = db.Begin()
//store contract information
var info ContractInformation
//get game id
rows, err := db.Query("SELECT * FROM games")
if err != nil {
fmt.Println("error: ", err)
}
defer rows.Close()
//scan row by row
for rows.Next() {
//must scan all variables
err := rows.Scan(&info.game_id, &info.map_name, &info.max_players, &info.is_verified)
if err != nil {
fmt.Println("error scanning row: ", err)
}
fmt.Println("id: ", info.game_id, "map: ", info.map_name, "max_players: ", info.max_players, "verified: ", info.is_verified)
}
return "finished"
}*/
|
package protocol_comm
import (
"github.com/colefan/gsgo/netio/iobuffer"
"github.com/colefan/gsgo/netio/packet"
)
const (
CMD_S_C_ERROR_NT = 0x00D2
CMD_C_S_VCHECK_REQ = 0x00D3
CMD_C_S_VCHECK_RESP = 0x00D4
)
type ServerErrorNt struct {
*packet.Packet
ReqCmdID uint16 //请求命令号
ErrCode uint16 //错误码
}
func (this *ServerErrorNt) DecodePacket() bool {
if this.IsDecoded() {
return true
}
packet.DecoderReadValue(this.Packet, &this.ReqCmdID)
packet.DecoderReadValue(this.Packet, &this.ErrCode)
this.PackDecoded = true
return true
}
func (this *ServerErrorNt) EncodePacket(nLen int) *iobuffer.OutBuffer {
buf := iobuffer.NewOutBuffer(nLen)
buf = this.Packet.Header.Encode(buf)
buf.PutRawValue(this.ReqCmdID)
buf.PutRawValue(this.ErrCode)
nPackLen := buf.GetLen() - packet.PACKET_PROXY_HEADER_LEN
buf.SetUint16(uint16(nPackLen), 0)
return buf
}
type VersionCheckReq struct {
*packet.Packet
NodeType uint16 //服务器类型
GameID uint32 //游戏ID
GameCode string //游戏编码
Version string //游戏版本号
}
func (this *VersionCheckReq) DecodePacket() bool {
if this.IsDecoded() {
return true
}
packet.DecoderReadValue(this.Packet, &this.NodeType)
packet.DecoderReadValue(this.Packet, &this.GameID)
packet.DecoderReadValue(this.Packet, &this.GameCode)
packet.DecoderReadValue(this.Packet, &this.Version)
this.PackDecoded = true
return true
}
func (this *VersionCheckReq) EncodePacket(nLen int) *iobuffer.OutBuffer {
buf := iobuffer.NewOutBuffer(nLen)
buf = this.Packet.Header.Encode(buf)
buf.PutRawValue(this.NodeType)
buf.PutRawValue(this.GameID)
buf.PutRawValue(this.GameCode)
buf.PutRawValue(this.Version)
nPackLen := buf.GetLen() - packet.PACKET_PROXY_HEADER_LEN
buf.SetUint16(uint16(nPackLen), 0)
return buf
}
type VersionCheckResp struct {
*packet.Packet
NodeType uint16 //服务器类型
GameID uint32 //游戏ID
GameCode string //游戏编码
Code uint16 //错误码
}
func (this *VersionCheckResp) DecodePacket() bool {
if this.IsDecoded() {
return true
}
packet.DecoderReadValue(this.Packet, &this.NodeType)
packet.DecoderReadValue(this.Packet, &this.GameID)
packet.DecoderReadValue(this.Packet, &this.GameCode)
packet.DecoderReadValue(this.Packet, &this.Code)
this.PackDecoded = true
return true
}
func (this *VersionCheckResp) EncodePacket(nLen int) *iobuffer.OutBuffer {
buf := iobuffer.NewOutBuffer(nLen)
buf = this.Packet.Header.Encode(buf)
buf.PutRawValue(this.NodeType)
buf.PutRawValue(this.GameID)
buf.PutRawValue(this.GameCode)
buf.PutRawValue(this.Code)
nPackLen := buf.GetLen() - packet.PACKET_PROXY_HEADER_LEN
buf.SetUint16(uint16(nPackLen), 0)
return buf
}
|
package jars
import (
"testing"
)
func TestStub(t *testing.T) {
mf := Manifest{}
mf.Parse("Hello: World")
if mf.Map["Hello"] == "" {
t.Fail()
}
if mf.Map["Hello"] != "World" {
t.Fail()
}
mf.PrintHeaders()
}
|
package main
import (
"bufio"
gproto "code.google.com/p/goprotobuf/proto"
"encoding/json"
"fmt"
"io/ioutil"
"libcleo"
"log"
"net"
"net/http"
"os"
"proto"
"query"
"strings"
"switchboard"
)
type ChampionPageParam struct {
Name string
ImgURL string
}
type PageParams struct {
Title string
Matching uint32
Matching_Percentage float32
Available uint32
Total uint32
Allies []ChampionPageParam
Enemies []ChampionPageParam
Valid bool
}
type SubqueryBundle struct {
Explorer int32
Response proto.QueryResponse
}
const ENABLE_EXPLORATORY_SUBQUERIES = true
// TODO: this probably shouldn't be a global.
var query_id = 0
// TODO: figure out how to pass this to function handler in a way that will be
// maintained between connections.
var switchb = switchboard.SwitchboardClient{} //, _ = switchboard.NewClient("tcp", &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 14002})
// Fetch index.html (the main app). Simple, static file.
func index_handler(w http.ResponseWriter, r *http.Request) {
log.Println("index requested")
data, err := ioutil.ReadFile("html/index.html")
if err != nil {
log.Println("index.html not present!")
http.NotFound(w, r)
} else {
w.Write(data)
}
}
/**
* This function is a handler for basic team queries that specify a
* list of allies and enemies. It builds a query from the URL parameters
* and sends it to a Cleo backend. Once the Cleo backend responds it
* serializes the response to JSON and returns it to the client.
*/
func simple_team(w http.ResponseWriter, r *http.Request) {
allies := strings.Split(r.FormValue("allies"), ",")
enemies := strings.Split(r.FormValue("enemies"), ",")
qry := form_request(allies, enemies)
response := proto.QueryResponse{}
is_valid := validate_request(qry)
if is_valid {
log.Println(fmt.Sprintf("%s: valid team query [allies=;enemies=]", query.GetQueryId(qry)))
response = request(qry)
if ENABLE_EXPLORATORY_SUBQUERIES {
log.Println(fmt.Sprintf("%s: submitting subqueries", query.GetQueryId(qry)))
subqueries := make(chan SubqueryBundle)
num_subqueries := 0
// Launch a bunch of different subqueries.
for _, cid := range proto.ChampionType_value {
check_champ := true
// Check if the champion is already present on the winner's
// list.
for _, winner := range qry.Winners {
if winner == cid {
check_champ = false
}
}
// If the champion already exists, don't fire off this
// subquery.
if check_champ {
go explore_subquery(qry, cid, subqueries)
num_subqueries += 1
}
}
// Collect all of the subquery responses.
for i := 0; i < num_subqueries; i++ {
bundle := <-subqueries
next_champ := proto.QueryResponse_ExploratoryChampionSubquery{
Explorer: proto.ChampionType(bundle.Explorer),
Results: bundle.Results,
Valid: (bundle.Response != nil),
}
response.NextChamp = append(response.NextChamp, next_champ)
}
}
data, err := json.Marshal(response)
if err != nil {
log.Println("SimpleTeam: invalid response.")
// TODO: handle this error appropriately.
}
w.Write(data)
} else {
log.Println(w, "SimpleTeam: invalid query.")
}
}
/**
* This function issues another modified query to the backend that includes
* an explorer ID (champion ID) that was not specified by the user. This
* will compute all of the standard stats if the user were to add this
* champion.
*/
func explore_subquery(qry proto.GameQuery, explorer_id int32, out chan SubqueryBundle) {
qry.Winners = append(qry.Winners, explorer_id)
// If the query is valid, submit it and pass the response back to the
// output channel.
if validate_request(qry) {
response := request(qry)
bundle_response := SubqueryBundle{
Explorer: explorer_id,
Response: response,
}
out <- bundle_response
// If it's not a valid query we should return a nil response so that
// we can still aggregate everything appropriately.
} else {
bundle_response := SubqueryBundle{
Explorer: explorer_id,
Response: nil,
}
out <- bundle_response
}
}
// Validate current just checks to make sure that all tokens are real.
func validate_request(qry proto.GameQuery) bool {
for _, winner := range qry.Winners {
if winner == proto.ChampionType_UNKNOWN {
return false
}
}
for _, loser := range qry.Losers {
if loser == proto.ChampionType_UNKNOWN {
return false
}
}
return true
}
func form_request(allies []string, enemies []string) proto.GameQuery {
qry := proto.GameQuery{}
qry.QueryProcess = gproto.Uint64(uint64(os.Getpid()))
qry.QueryId = gproto.Uint64(uint64(query_id))
query_id += 1
// Map the strings specified in the url to ChampionType's.
for _, name := range allies {
if len(name) > 0 {
log.Println(fmt.Sprintf("%s: ally required = %s", query.GetQueryId(qry), libcleo.String2ChampionType(name)))
qry.Winners = append(qry.Winners, libcleo.String2ChampionType(name))
}
}
for _, name := range enemies {
if len(name) > 0 {
log.Println(fmt.Sprintf("%s: enemy required = %s", query.GetQueryId(qry), libcleo.String2ChampionType(name)))
qry.Winners = append(qry.Losers, libcleo.String2ChampionType(name))
}
}
return qry
}
func request(qry proto.GameQuery) proto.QueryResponse {
// Get a switchboard socket to talk to server
conn, cerr := switchb.GetStream()
if cerr != nil {
log.Println(fmt.Sprintf("%s: couldn't connect to a Cleo server.", query.GetQueryId(qry)))
return proto.QueryResponse{Successful: gproto.Bool(false)}
}
// Form a GameQuery.
data, _ := gproto.Marshal(&qry)
log.Println(fmt.Sprintf("%s: query sent", query.GetQueryId(qry)))
rw := bufio.NewReadWriter(bufio.NewReader(*conn), bufio.NewWriter(*conn))
rw.WriteString(string(data) + "|")
rw.Flush()
// Unmarshal the response.
response := proto.QueryResponse{}
log.Println(fmt.Sprintf("%s: awaiting response...", query.GetQueryId(qry)))
reply, _ := rw.ReadString('|')
// If we get a zero-length reply this means the backend crashed. Don't
// freak out. We got this.
if len(reply) == 0 {
return response
}
gproto.Unmarshal([]byte(reply[:len(reply)-1]), &response)
log.Println(fmt.Sprintf("%s: valid response received", query.GetQueryId(qry)))
return response
}
func main() {
http.HandleFunc("/", index_handler)
http.HandleFunc("/team/", simple_team)
// Initialize the connection to
cerr := error(nil)
switchb, cerr = switchboard.NewClient("tcp", &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 14002})
if cerr != nil {
log.Fatal("Couldn't find any available backends.")
}
// Serve any files in static/ directly from the filesystem.
http.HandleFunc("/static/", func(w http.ResponseWriter, r *http.Request) {
log.Println("GET", r.URL.Path[1:])
http.ServeFile(w, r, "html/"+r.URL.Path[1:])
})
log.Println("Awaiting requests...")
log.Fatal("Couldn't listen on port 8088:", http.ListenAndServe(":8088", nil))
}
|
package render
import (
"encoding/json"
"net/http"
)
const ContentJSON = "application/json"
type jsonRender struct{}
func (_ jsonRender) Render(rw http.ResponseWriter, code int, data ...interface{}) error {
rw.Header().Set("Content-Type", ContentJSON+"; charset=utf-8")
rw.WriteHeader(code)
encoder := json.NewEncoder(rw)
return encoder.Encode(data[0])
}
|
package services
import (
"github.com/andrewesteves/taskee-api/entities"
"github.com/andrewesteves/taskee-api/utils"
"github.com/andrewesteves/taskee-api/validations"
"github.com/gofiber/fiber"
"github.com/jinzhu/gorm"
)
// UserService type
type UserService struct {
DB *gorm.DB
}
// Register new users
func (u UserService) Register(ctx *fiber.Ctx) {
var err error
user := new(entities.User)
if err := ctx.BodyParser(user); err != nil {
ctx.Status(503).JSON(fiber.Map{
"message": "Whoops! We could not process your request",
})
return
}
if len(validations.UserRegister(*user)) > 0 {
ctx.Status(422).JSON(validations.UserRegister(*user))
return
}
var userDB entities.User
u.DB.Where("email = ?", user.Email).First(&userDB)
if userDB.ID != 0 {
ctx.Status(422).JSON(fiber.Map{
"message": "We already have a registered user with this e-mail",
})
return
}
user.Password, err = utils.GenerateHash(user.Password)
if err != nil {
ctx.Status(503).JSON(fiber.Map{
"message": "Whoops! We could not process your credentials correctly",
})
return
}
user.Token, err = utils.GenerateToken()
if err != nil {
ctx.Status(503).JSON(fiber.Map{
"message": "Whoops! We could not generate your access key",
})
return
}
u.DB.Save(&user)
ctx.JSON(user)
}
// Login users
func (u UserService) Login(ctx *fiber.Ctx) {
user := new(entities.User)
var userDB entities.User
var err error
if err = ctx.BodyParser(user); err != nil {
ctx.Status(503).JSON(fiber.Map{
"message": "Whoops! We could not process your request",
})
return
}
if len(validations.UserLogin(*user)) > 0 {
ctx.Status(422).JSON(validations.UserLogin(*user))
return
}
u.DB.Where("email = ?", user.Email).First(&userDB)
if !utils.CompareHash(userDB.Password, user.Password) {
ctx.Status(401).JSON(fiber.Map{
"message": "These credentials do not match with our records",
})
return
}
userDB.Token, err = utils.GenerateToken()
if err != nil {
ctx.Status(503).JSON(fiber.Map{
"message": "Whoops! We could not generate your access key",
})
return
}
u.DB.Save(&userDB)
ctx.JSON(fiber.Map{
"name": userDB.Name,
"email": userDB.Email,
"token": userDB.Token,
})
}
// Logout users
func (u UserService) Logout(ctx *fiber.Ctx) {
user := ctx.Locals("user").(entities.User)
user.Token = ""
u.DB.Save(&user)
ctx.JSON(fiber.Map{
"message": "You are logged out",
})
}
|
/*
Copyright © 2022 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package svc
import (
"fmt"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/debug"
"github.com/rancher-sandbox/rancher-desktop/src/go/privileged-service/pkg/port"
)
// Supervisor implements service handler interface for
// Rancher Desktop Privileged Service
type Supervisor struct {
eventLogger debug.Log
portServer *port.Server
}
func NewSupervisor(portServer *port.Server, logger debug.Log) *Supervisor {
return &Supervisor{
eventLogger: logger,
portServer: portServer,
}
}
// Execute is the core of the supervisor service to handle all
// the service related event requests. Any outside function
// calls MUST be called in a goroutine.
// The signature must NOT change since it is part of the standard
// service handler interface
// This implements the [golang.org/x/sys/windows/svc.Handler] interface
func (s *Supervisor) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
changes <- svc.Status{State: svc.StartPending}
startErr := make(chan error)
go func() {
s.eventLogger.Info(uint32(windows.NO_ERROR), "port server is starting")
startErr <- s.portServer.Start()
}()
changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
loop:
for {
select {
case e := <-startErr:
s.eventLogger.Error(uint32(windows.ERROR_EXCEPTION_IN_SERVICE), fmt.Sprintf("supervisor failed to start: %v", e))
return false, uint32(windows.ERROR_SERVICE_NEVER_STARTED)
case c := <-r:
switch c.Cmd {
case svc.Interrogate:
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
s.portServer.Stop()
s.eventLogger.Info(uint32(windows.NO_ERROR), "port server is stopping")
changes <- svc.Status{State: svc.Stopped, Accepts: cmdsAccepted}
break loop
default:
s.eventLogger.Error(uint32(windows.ERROR_INVALID_SERVICE_CONTROL), fmt.Sprintf("unexpected control request #%d", c))
}
}
}
changes <- svc.Status{State: svc.StopPending}
return false, 0
}
|
package baudot
import (
"fmt"
"testing"
)
func TestITA2EncodeChar(t *testing.T) {
tt := []struct {
caseName string
char rune
charset Charset
expectCode byte
expectShift bool
shouldFail bool
failedText string
}{
{
caseName: "test regular char",
char: '=',
charset: Figures,
expectCode: 30,
expectShift: false,
shouldFail: false,
failedText: "code for '=' should be 30, got %v",
},
{
caseName: "test invalid char",
char: '$',
charset: Letters,
expectCode: 0,
expectShift: false,
shouldFail: true,
failedText: "encode code for char '$' should return error, got %v",
},
{
caseName: "test shift to letters charset",
char: 'A',
charset: Figures,
expectCode: 3,
expectShift: true,
shouldFail: false,
failedText: "value of shifted Should Be true, got %v",
},
{
caseName: "test shift to figures charset",
char: '6',
charset: Letters,
expectCode: 21,
expectShift: true,
shouldFail: false,
failedText: "value of shifted Should Be true, got %v",
},
}
c := NewITA2(false)
for _, tc := range tt {
t.Run(tc.caseName, func(t *testing.T) {
code, shifted, err := c.EncodeChar(tc.char, tc.charset)
if err != nil {
if !tc.shouldFail {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v, %v", code, shifted, err))
}
} else {
if tc.expectCode != code || tc.expectShift != shifted {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v, %v", code, shifted, err))
}
}
})
}
}
func TestITA2DecodeChar(t *testing.T) {
tt := []struct {
caseName string
code byte
charset Charset
expectChar rune
expectShift bool
shouldFail bool
failedText string
}{
{
caseName: "test letter code",
code: 10,
charset: Letters,
expectChar: 'R',
expectShift: false,
shouldFail: false,
failedText: "expect 'R', got %v",
},
{
caseName: "test figure code 1",
code: 20,
charset: Figures,
expectChar: '£',
expectShift: false,
shouldFail: false,
failedText: "expect '£', got %v",
},
{
caseName: "test figure code 2",
code: 5,
charset: Figures,
expectChar: '\'',
expectShift: false,
shouldFail: false,
failedText: "expect single quote('), got %v",
},
{
caseName: "test invalid code",
code: 50,
charset: Letters,
expectChar: '\u0000',
expectShift: false,
shouldFail: true,
failedText: "expect an error, got %v",
},
{
caseName: "test shift to letters",
code: 31,
charset: Figures,
expectChar: '\u0000',
expectShift: true,
shouldFail: false,
failedText: "expect LS control, got %v",
},
{
caseName: "test shift to figures",
code: 27,
charset: Letters,
expectChar: '\u0000',
expectShift: true,
shouldFail: false,
failedText: "expect FS control, got %v",
},
}
c := NewITA2(false)
for _, tc := range tt {
t.Run(tc.caseName, func(t *testing.T) {
char, shifted, err := c.DecodeChar(tc.code, tc.charset)
if err != nil {
if !tc.shouldFail {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v, %v", char, shifted, err))
}
} else {
if tc.expectChar != char || tc.expectShift != shifted {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v, %v", char, shifted, err))
}
}
})
}
}
func TestITA2Encode(t *testing.T) {
tt := []struct {
caseName string
msg string
ignErr bool
expect []byte
shouldFail bool
failedText string
}{
{
caseName: "test letters and figures",
msg: "X&Y",
ignErr: false,
expect: []byte{0, 31, 29, 27, 26, 31, 21},
shouldFail: false,
failedText: fmt.Sprintf("expect %v, got %%v", []byte{0, 31, 29, 27, 26, 31, 21}),
},
{
caseName: "test invalid msg",
msg: "1 + 1 $ 2",
ignErr: false,
expect: nil,
shouldFail: true,
failedText: "expect an error, got %v",
},
{
caseName: "test invalid msg, ignore error",
msg: "1 + 1 $ 2",
ignErr: true,
expect: []byte{0, 31, 27, 23, 4, 17, 4, 23, 4, 4, 19},
shouldFail: true,
failedText: fmt.Sprintf("expect %v, got %%v", []byte{0, 31, 27, 23, 4, 17, 4, 23, 4, 4, 19}),
},
}
c := NewITA2(false)
for _, tc := range tt {
t.Run(tc.caseName, func(t *testing.T) {
c.ignErr = tc.ignErr
codes, err := c.Encode(tc.msg)
if err != nil {
if !tc.shouldFail {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v", codes, err))
}
} else {
if len(tc.expect) != len(codes) {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v", codes, err))
} else {
for index, value := range tc.expect {
if value != codes[index] {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v", codes, err))
}
}
}
}
})
}
}
func TestITA2Decode(t *testing.T) {
tt := []struct {
caseName string
codes []byte
ignErr bool
expect string
shouldFail bool
failedText string
}{
{
caseName: "test decoding valid code",
codes: []byte{0, 31, 29, 27, 26, 31, 21},
ignErr: false,
expect: "X&Y",
shouldFail: false,
failedText: "expect 'X&Y', got %v",
},
{
caseName: "test decoding valid code contains null",
codes: []byte{0, 27, 1, 31, 22, 0, 0, 24, 27, 26, 0, 31, 10, 27, 19, 31, 9, 27, 19},
ignErr: false,
expect: "3PO&R2D2",
shouldFail: false,
failedText: "expect '3PO&R2D2', got %v",
},
{
caseName: "test valid code",
codes: []byte{0, 27, 1, 31, 100, 12},
ignErr: false,
expect: "",
shouldFail: true,
failedText: "expect an error, got %v",
},
{
caseName: "test valid code, ignore error",
codes: []byte{0, 27, 1, 31, 100, 12},
ignErr: true,
expect: "3N",
shouldFail: true,
failedText: "expect an error, got %v",
},
}
c := NewITA2(false)
for _, tc := range tt {
t.Run(tc.caseName, func(t *testing.T) {
c.ignErr = tc.ignErr
str, err := c.Decode(tc.codes)
if err != nil {
if !tc.shouldFail {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v", str, err))
}
} else {
if tc.expect != str {
t.Errorf(tc.failedText, fmt.Sprintf("%v, %v", str, err))
}
}
})
}
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/linkedlocked/webapp/database"
"github.com/linkedlocked/webapp/models"
)
type GatewayUpdate struct {
GatewayID int
PoleID int
Locked bool
LockedCardID string
Token string
}
func routePostGatewayUpdate(c *gin.Context) {
gatewayUpdateObject := GatewayUpdate{}
if c.BindJSON(&gatewayUpdateObject) == nil {
fmt.Println("[routePostGatewayUpdate] <<Binding succesful>>")
fmt.Println(gatewayUpdateObject)
pole := models.Pole{}
database.DB.First(&pole, gatewayUpdateObject.PoleID)
pole.Locked = gatewayUpdateObject.Locked
//If the pole is now locked, then attach the key struct to it.
if pole.Locked {
fmt.Println("Pole is locked, getting Card:", gatewayUpdateObject.LockedCardID)
pole.LockedBy = models.GetCardByUID(gatewayUpdateObject.LockedCardID)
fmt.Println("LockedBy:", pole.LockedBy)
} else {
fmt.Println("Pole is NOT locked, setting Card{}")
pole.LockedBy = models.Card{}
pole.CardRefer = 0
}
database.DB.Save(&pole)
c.JSON(200, gatewayUpdateObject)
} else {
c.String(500, "Something went wrong in binding.")
}
c.JSON(200, gatewayUpdateObject)
}
func routePostFailedUnlock(c *gin.Context) {
fmt.Println("[routePostFailedUnlock] <<successful>>")
fa := models.FailedAttempt{
PoleID: c.Param("poleID"),
CardID: c.Param("chipID"),
}
fa.Create()
c.String(200, "ok")
}
func routeShowFailedAttempts(c *gin.Context) {
c.JSON(200, models.GetFailedAttempts())
}
func routeReceiveState(c *gin.Context) {
PrivateIdentifier := c.Param("privateid")
thisGateway := models.GetGatewayByPrivateIdentifier(PrivateIdentifier)
var ok bool
if thisGateway.ID > 0 {
ok = true
} else {
ok = false
}
c.JSON(200, gin.H{
"OK": ok,
"PrivateIdentifier": PrivateIdentifier,
"ThisGateway": thisGateway,
"Poles": models.GetPolesByGateway(thisGateway),
})
}
|
package sudoku
import (
"fmt"
"math/rand"
"strconv"
)
type nakedSingleTechnique struct {
*basicSolveTechnique
}
type hiddenSingleTechnique struct {
*basicSolveTechnique
}
type obviousInCollectionTechnique struct {
*basicSolveTechnique
}
func (self *obviousInCollectionTechnique) humanLikelihood(step *SolveStep) float64 {
return self.difficultyHelper(1.0)
}
func (self *obviousInCollectionTechnique) Description(step *SolveStep) string {
if len(step.TargetNums) == 0 {
return ""
}
num := step.TargetNums[0]
groupName := "<NONE>"
groupNumber := 0
switch self.groupType {
case _GROUP_BLOCK:
groupName = "block"
groupNumber = step.TargetCells.Block()
case _GROUP_COL:
groupName = "column"
groupNumber = step.TargetCells.Col()
case _GROUP_ROW:
groupName = "row"
groupNumber = step.TargetCells.Row()
}
return fmt.Sprintf("%s is the only cell in %s %d that is unfilled, and it must be %d", step.TargetCells.Description(), groupName, groupNumber, num)
}
func (self *obviousInCollectionTechnique) Candidates(grid Grid, maxResults int) []*SolveStep {
return self.candidatesHelper(self, grid, maxResults)
}
func (self *obviousInCollectionTechnique) find(grid Grid, coordinator findCoordinator) {
obviousInCollection(grid, self, self.getter(grid), coordinator)
}
func obviousInCollection(grid Grid, technique SolveTechnique, collectionGetter func(index int) CellSlice, coordinator findCoordinator) {
indexes := rand.Perm(DIM)
for _, index := range indexes {
if coordinator.shouldExitEarly() {
return
}
collection := collectionGetter(index)
openCells := collection.FilterByHasPossibilities()
if len(openCells) == 1 {
//Okay, only one cell in this collection has an opening, which must mean it has one possibilty.
cell := openCells[0]
possibilities := cell.Possibilities()
//len(possibiltiies) SHOULD be 1, but check just in case.
if len(possibilities) == 1 {
possibility := possibilities[0]
step := &SolveStep{
Technique: technique,
TargetCells: CellRefSlice{cell.Reference()},
TargetNums: IntSlice{possibility},
PointerCells: collection.RemoveCells(CellSlice{cell}).CellReferenceSlice(),
}
if step.IsUseful(grid) {
if coordinator.foundResult(step) {
return
}
}
}
}
}
}
func (self *nakedSingleTechnique) humanLikelihood(step *SolveStep) float64 {
return self.difficultyHelper(40.0)
}
func (self *nakedSingleTechnique) Description(step *SolveStep) string {
if len(step.TargetNums) == 0 {
return ""
}
num := step.TargetNums[0]
return fmt.Sprintf("%d is the only remaining valid number for that cell", num)
}
func (self *nakedSingleTechnique) Candidates(grid Grid, maxResults int) []*SolveStep {
return self.candidatesHelper(self, grid, maxResults)
}
func (self *nakedSingleTechnique) find(grid Grid, coordinator findCoordinator) {
//TODO: test that this will find multiple if they exist.
getter := grid.queue().NewGetter()
for {
if coordinator.shouldExitEarly() {
return
}
obj := getter.GetSmallerThan(2)
if obj == nil {
//There weren't any cells with one option left.
//If there weren't any, period, then results is still nil already.
return
}
cell := obj.(Cell)
step := &SolveStep{
Technique: self,
TargetCells: CellRefSlice{cell.Reference()},
TargetNums: IntSlice{cell.implicitNumber()},
PointerCells: cell.Neighbors().FilterByFilled().CellReferenceSlice(),
}
if step.IsUseful(grid) {
if coordinator.foundResult(step) {
return
}
}
}
}
func (self *hiddenSingleTechnique) humanLikelihood(step *SolveStep) float64 {
return self.difficultyHelper(18.0)
}
func (self *hiddenSingleTechnique) Description(step *SolveStep) string {
//TODO: format the text to say "first/second/third/etc"
if len(step.TargetCells) == 0 || len(step.TargetNums) == 0 {
return ""
}
cell := step.TargetCells[0]
num := step.TargetNums[0]
var groupName string
var otherGroupName string
var groupNum int
var otherGroupNum string
switch self.groupType {
case _GROUP_BLOCK:
groupName = "block"
otherGroupName = "cell"
groupNum = step.TargetCells.Block()
otherGroupNum = step.TargetCells.Description()
case _GROUP_ROW:
groupName = "row"
otherGroupName = "column"
groupNum = step.TargetCells.Row()
otherGroupNum = strconv.Itoa(cell.Col)
case _GROUP_COL:
groupName = "column"
otherGroupName = "row"
groupNum = step.TargetCells.Col()
otherGroupNum = strconv.Itoa(cell.Row)
default:
groupName = "<NONE>"
otherGroupName = "<NONE>"
groupNum = -1
otherGroupNum = "<NONE>"
}
return fmt.Sprintf("%d is required in the %d %s, and %s is the only %s it fits", num, groupNum, groupName, otherGroupNum, otherGroupName)
}
func (self *hiddenSingleTechnique) Candidates(grid Grid, maxResults int) []*SolveStep {
return self.candidatesHelper(self, grid, maxResults)
}
func (self *hiddenSingleTechnique) find(grid Grid, coordinator findCoordinator) {
//TODO: test that if there are multiple we find them both.
necessaryInCollection(grid, self, self.getter(grid), coordinator)
}
func necessaryInCollection(grid Grid, technique SolveTechnique, collectionGetter func(index int) CellSlice, coordinator findCoordinator) {
//This will be a random item
indexes := rand.Perm(DIM)
for _, i := range indexes {
if coordinator.shouldExitEarly() {
return
}
seenInCollection := make([]int, DIM)
collection := collectionGetter(i)
for _, cell := range collection {
for _, possibility := range cell.Possibilities() {
seenInCollection[possibility-1]++
}
}
seenIndexes := rand.Perm(DIM)
for _, index := range seenIndexes {
seen := seenInCollection[index]
if seen == 1 {
//Okay, we know our target number. Which cell was it?
for _, cell := range collection {
if cell.Possible(index + 1) {
//Found it... just make sure it's useful (it would be rare for it to not be).
step := &SolveStep{
Technique: technique,
TargetCells: CellRefSlice{cell.Reference()},
TargetNums: IntSlice{index + 1},
PointerCells: collection.FilterByUnfilled().RemoveCells(CellSlice{cell}).CellReferenceSlice(),
}
if step.IsUseful(grid) {
if coordinator.foundResult(step) {
return
}
}
//Hmm, wasn't useful. Keep trying...
}
}
}
}
}
}
|
package network
import (
"encoding/json"
"eos-network/crypto"
)
type ChainIdType Sha256Type
type GetChainResult struct {
ServerVersion string `json:"server_version"`
ChainId Sha256Type `json:"chain_id"`
HeadBlockNum uint32 `json:"head_block_num"`
LastIrreversibleBlockNum uint32 `json:"last_irreversible_block_num"`
LastIrreversibleBlockId Sha256Type `json:"last_irreversible_block_id"`
HeadBlockId Sha256Type `json:"head_block_id"`
HeadBlockTime TimePoint `json:"head_block_time"`
HeadBlockProducer AccountName `json:"head_block_producer"`
VirtualBlockCpuLimit uint64 `json:"virtual_block_cpu_limit"`
VirtualBlockNetLimit uint64 `json:"virtual_block_net_limit"`
BlockCpuLimit uint64 `json:"block_cpu_limit"`
BlockNetLimit uint64 `json:"block_net_limit"`
}
type GetBlockResult struct {
SignedBlock
Id Sha256Type `json:"id"`
BlockNum uint32 `json:"block_num"`
RefBlockPrefix uint32 `json:"ref_block_prefix"`
}
//func (m *GetBlockResult) MarshalJSON() []byte {
// data, err := MarshalBinary(m)
// if err != nil {
// return nil
// }
// return data
//}
//
//
//func (m *GetBlockResult) UnmarshalJSON(data []byte) error {
// err := json.Unmarshal(data, m)
// if err != nil {
// return err
// }
// return nil
//}
type GetAbiResult struct {
AccountName AccountName `json:"account_name"`
Abi ABIDef `json:"abi"`
}
type GetCodeResult struct {
AccountName AccountName `json:"account_name"`
Wast string `json:"wast"`
Wasm string `json:"wasm"`
CodeHash Sha256Type `json:"code_hash"`
Abi *ABIDef `json:"abi"`
}
type GetAccountResult struct {
AccountName AccountName `json:"account_name"`
HeadBlockNum uint32 `json:"head_block_num"`
HeadBlockTime TimePointSec `json:"head_block_time"`
Privileged bool `json:"privileged"`
LastCodeUpdate TimePointSec `json:"last_code_update"`
Created TimePointSec `json:"created"`
CoreLiquidBalance Asset `json:"core_liquid_balance"`
RAMQuota int64 `json:"ram_quota"`
NetWeight int64 `json:"net_weight"`
CPUWeight int64 `json:"cpu_weight"`
NetLimit AccountResourceLimit `json:"net_limit"`
CPULimit AccountResourceLimit `json:"cpu_limit"`
RAMUsage int64 `json:"ram_usage"`
Permissions []Permission `json:"permissions"`
TotalResources *TotalResources `json:"total_resources"`
SelfDelegatedBandwidth *DelegatedBandwidth `json:"self_delegated_bandwidth"`
RefundRequest *RefundRequest `json:"refund_request"`
VoterInfo *VoterInfo `json:"voter_info"`
}
func (r *GetAccountResult) MarshalJSON() []byte {
data, err := MarshalBinary(r)
if err != nil {
return nil
}
return data
}
func (r *GetAccountResult) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, r)
if err != nil {
return err
}
return nil
}
type GetTableRowsResult struct {
Rows []byte `json:"rows"`
More bool `json:"more"`
}
type GetCurrencyBalanceResult struct {
Balance []Asset
}
type GetCurrencyStatsResult struct {
Supply Asset `json:"supply"`
MaxSupply Asset `json:"max_supply"`
Issuer AccountName `json:"issuer"`
}
type ProducerInfo struct {
Owner AccountName `json:"owner"`
TotalVotes float64 `json:"total_votes"`
ProducerKey crypto.PublicKey `json:"producer_key"`
IsActive bool `json:"is_active"`
Url string `json:"url"`
UnpaidBloks uint32 `json:"unpaid_bloks"`
LastClaimTime uint64 `json:"last_claim_time"`
Location uint16 `json:"location"`
}
type GetProducersResult struct {
Rows []ProducerInfo `json:"rows"`
TotalProducerVoteWeight float64 `json:"total_producer_vote_weight"`
More string `json:"more"`
}
type GetRequiredKeysResult struct {
RequiredKeys []crypto.PublicKey `json:"required_keys"`
}
type PushTransactionResult struct {
TransactionID ChecksumType `json:"transaction_id"`
//Processed variant `json:"processed"` //fc::variant
}
type PushActionResult struct {
}
type GetTransactionResult struct {
Id Sha256Type `json:"id"`
Trx SignedTransaction `json:"trx"`
BlockTime BlockTimestamp `json:"block_time"`
BlockNum uint32 `json:"block_num"`
LastIrreversibleBlock uint32 `json:"last_irreversible_block"`
Traces []ActionTrace `json:"traces"`
}
type GetControlledAccountsResults struct {
ControlledAccounts []AccountName `json:"controlled_accounts"`
}
type OrderedActionResult struct {
GlobalActionSeq uint64 `json:"global_action_seq"`
AccountActionSeq int32 `json:"account_action_seq"`
BlockNum uint32 `json:"block_num"`
BlockTime BlockTimestamp `json:"block_time"`
ActionTrace ActionTrace `json:"action_trace"`
}
type GetActionsResult struct {
Actions []OrderedActionResult `json:"actions"`
LastIrreversibleBlock uint32 `json:"last_irreversible_block"`
TimeLimitExceededError bool `json:"time_limit_exceeded_error"`
}
type GetKeyAccountsResults struct {
AccountNames []AccountName `json:"account_names"`
}
type NetStatusResult struct {
Peer string `json:"peer"`
Connecting bool `json:"connecting"`
Syncing bool `json:"syncing"`
LastHandshake HandshakeMessage `json:"last_handshake"`
}
|
/*
* @lc app=leetcode.cn id=8 lang=golang
*
* [8] 字符串转换整数 (atoi)
*/
package solution
import (
"math"
"strings"
)
// @lc code=start
func myAtoi(s string) int {
s = strings.TrimLeft(s, " ")
if len(s) == 0 {
return 0
}
i, sign := 0, 1
if s[0] == '-' || s[0] == '+' {
i++
if s[0] == '-' {
sign = -1
}
}
ans := 0
for i < len(s) {
if s[i] < '0' || s[i] > '9' {
return sign * ans
}
if ans > 214748364 ||
ans == 214748364 && (sign == 1 && s[i] > '7' || sign == -1 && s[i] > '8') {
if sign == 1 {
return math.MaxInt32
}
return math.MinInt32
}
ans = 10*ans + int(s[i]-'0')
i++
}
return sign * ans
}
// @lc code=end
|
package command
import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/yamil-rivera/flowit/internal/config"
"github.com/yamil-rivera/flowit/internal/fsm"
"github.com/yamil-rivera/flowit/internal/io"
"github.com/yamil-rivera/flowit/internal/runtime"
"github.com/yamil-rivera/flowit/internal/utils"
w "github.com/yamil-rivera/flowit/internal/workflow"
)
// RuntimeService exposes useful methods for managing workflow executions
type RuntimeService interface {
Run(optionalWorkflowID utils.OptionalString, args []string, workflowName, stageID string, workflowDefinition config.Flowit, executor runtime.Executor, writer runtime.Writer) error
Cancel(workflowID string, workflowName string, writer runtime.Writer) error
}
// RepositoryService exposes useful methods for persisting and retrieving workflows
type RepositoryService interface {
GetWorkflow(workflowName, workflowID string) (w.OptionalWorkflow, error)
GetWorkflows(workflowName string, count int, excludeInactive bool) ([]w.Workflow, error)
GetAllWorkflows(excludeInactive bool) ([]w.Workflow, error)
GetWorkflowFromPreffix(workflowName, workflowIDPreffix string) (w.OptionalWorkflow, error)
PutWorkflow(workflow w.Workflow) error
}
// Service implements the command service interface
type Service struct {
rootCommand *cobra.Command
runtimeService RuntimeService
fsmServiceFactory fsm.FsmServiceFactory
repositoryService RepositoryService
workflowDefinition *config.WorkflowDefinition
}
type command struct {
cobra *cobra.Command
subcommands []command
}
// NewService creates a new command service
func NewService(run RuntimeService, fsf fsm.FsmServiceFactory, repo RepositoryService, wd *config.WorkflowDefinition) *Service {
return &Service{nil, run, fsf, repo, wd}
}
// RegisterCommands registers all commands and subcommands based on the provided configuration
// and previous active workflows
func (s *Service) RegisterCommands(version string) error {
var mainCommands []command // nolint:prealloc
fsmService, err := s.fsmServiceFactory.NewFsmService(s.workflowDefinition.Flowit)
if err != nil {
return errors.WithStack(err)
}
workflowDefinitions := s.workflowDefinition.Flowit.Workflows
for _, workflowDefinition := range workflowDefinitions {
workflowName := workflowDefinition.ID
stateMachine := workflowDefinition.StateMachine
cmd := command{}
cmd.cobra = newContainerCommand(workflowName)
initialStages, err := s.generateInitialCommands(fsmService, stateMachine, workflowName)
if err != nil {
return errors.WithStack(err)
}
cmd.subcommands = initialStages
mainCommands = append(mainCommands, cmd)
}
activeWorkflows, err := s.getAllActiveWorkflows()
if err != nil {
return errors.WithStack(err)
}
for _, workflow := range activeWorkflows {
childCmd := command{}
childCmd.cobra = newContainerCommand(workflow.Preffix)
stages, err := s.generatePossibleCommands(workflow)
if err != nil {
return errors.Wrap(err, "Error generating possible commands")
}
childCmd.subcommands = stages
// Check if we already have a registered command for this workflow name
var cmd *command
var found bool
for i := range mainCommands {
mainCmd := mainCommands[i]
if mainCmd.cobra.Use == workflow.Name {
cmd = &mainCmd
found = true
}
}
if !found {
cmd = &command{}
cmd.cobra = newContainerCommand(workflow.Name)
}
cmd.subcommands = append(cmd.subcommands, childCmd)
mainCommands = replaceCommand(mainCommands, *cmd)
}
// add version command
cmd := command{}
cmd.cobra = newPrintCommand("version", version)
mainCommands = append(mainCommands, cmd)
// TODO: add update command
rootCommand := &cobra.Command{
Use: "flowit",
Short: "A flexible workflow manager",
Long: "A flexible workflow manager",
}
// TODO: Avoid showing usage when a step fails
for _, mainCommand := range mainCommands {
for _, subcommands := range mainCommand.subcommands {
for _, subcommand := range subcommands.subcommands {
subcommands.cobra.AddCommand(subcommand.cobra)
}
mainCommand.cobra.AddCommand(subcommands.cobra)
}
rootCommand.AddCommand(mainCommand.cobra)
}
s.rootCommand = rootCommand
return nil
}
// Execute will kickstart the root command
func (s Service) Execute() error {
if err := s.rootCommand.Execute(); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s Service) getAllActiveWorkflows() ([]w.Workflow, error) {
return s.repositoryService.GetAllWorkflows(true)
}
func newContainerCommand(commandUse string) *cobra.Command {
return &cobra.Command{
Use: commandUse,
}
}
func newPrintCommand(command string, out string) *cobra.Command {
return &cobra.Command{
Use: command,
Run: func(cmd *cobra.Command, args []string) {
io.Println(out)
},
}
}
// TODO: Add arguments description to command help
func newStageCommand(command string, args int, run func(cmd *cobra.Command, args []string) error) *cobra.Command {
return &cobra.Command{
Use: command,
Args: cobra.ExactArgs(args),
RunE: run,
}
}
func (s Service) generateCommandsFromStagesForWorkflow(workflow w.Workflow, stages []string) ([]command, error) {
commands := make([]command, len(stages))
for i, stageID := range stages {
runFunc := func(workflowName string, stageID string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
optionalWorkflowID, err := s.getWorkflowIDFromCommand(cmd)
if err != nil {
return errors.WithStack(err)
}
err = s.runtimeService.Run(optionalWorkflowID, args, workflowName, stageID, s.workflowDefinition.Flowit, runtime.NewUnixShellExecutor(), io.NewConsoleWriter())
return err
}
}(workflow.Name, stageID)
stage, err := stage(workflow, workflow.Name, stageID)
if err != nil {
return nil, errors.WithStack(err)
}
commands[i].cobra = newStageCommand(stage.ID, len(stage.Args), runFunc)
}
return commands, nil
}
func (s Service) generateCommandsFromStages(workflowName string, stages []string) ([]command, error) {
commands := make([]command, len(stages))
for i, stageID := range stages {
runFunc := func(workflowName string, stageID string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
optionalWorkflowID, err := s.getWorkflowIDFromCommand(cmd)
if err != nil {
return errors.WithStack(err)
}
err = s.runtimeService.Run(optionalWorkflowID, args, workflowName, stageID, s.workflowDefinition.Flowit, runtime.NewUnixShellExecutor(), io.NewConsoleWriter())
return err
}
}(workflowName, stageID)
stage, err := s.workflowDefinition.Stage(workflowName, stageID)
if err != nil {
return nil, errors.WithStack(err)
}
commands[i].cobra = newStageCommand(stage.ID, len(stage.Args), runFunc)
}
return commands, nil
}
func (s Service) generateInitialCommands(fsmService fsm.Service, stateMachine, workflowName string) ([]command, error) {
initialEvent := fsmService.InitialState(stateMachine)
initialEvents := []string{initialEvent}
commands, err := s.generateCommandsFromStages(workflowName, initialEvents)
if err != nil {
return nil, errors.WithStack(err)
}
return commands, nil
}
func (s Service) generatePossibleCommands(workflow w.Workflow) ([]command, error) {
fsmService, err := s.fsmServiceFactory.NewFsmService(workflow.State)
if err != nil {
return nil, errors.WithStack(err)
}
var availableStates []string
if workflow.LatestExecution.Checkpoint >= 0 {
availableStates = fsmService.AvailableStates(workflow.StateMachineID(), workflow.LatestExecution.FromStage)
} else {
availableStates = fsmService.AvailableStates(workflow.StateMachineID(), workflow.LatestExecution.Stage)
}
commands, err := s.generateCommandsFromStagesForWorkflow(workflow, availableStates)
if err != nil {
return nil, errors.WithStack(err)
}
commands = append(commands, s.generateCancelCommand(workflow.Name))
return commands, nil
}
func (s Service) generateCancelCommand(workflowName string) command {
return command{
cobra: &cobra.Command{
Use: "cancel",
RunE: func(workflowName string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
optionalWorkflowID, err := s.getWorkflowIDFromCommand(cmd)
if err != nil {
return errors.WithStack(err)
}
// We are sure the optional is wrapping a workflow ID
workflowID, _ := optionalWorkflowID.Get()
err = s.runtimeService.Cancel(workflowID, workflowName, io.NewConsoleWriter())
return err
}
}(workflowName),
},
}
}
// cmd parent is either a workflow definition name or a workflow instance name
func (s Service) getWorkflowIDFromCommand(cmd *cobra.Command) (utils.OptionalString, error) {
parentName := cmd.Parent().Name()
isWorkflowDefinition := false
for _, workflow := range s.workflowDefinition.Flowit.Workflows {
if workflow.ID == parentName {
isWorkflowDefinition = true
break
}
}
if !isWorkflowDefinition {
// parentName has to be a workflow instance name and cmd.Parent().Parent() a workflow definition
workflowID, err := s.getWorkflowIDFromName(cmd.Parent().Parent().Name(), parentName)
if err == nil {
return utils.NewStringOptional(workflowID), nil
}
return utils.OptionalString{}, errors.WithStack(err)
}
return utils.OptionalString{}, nil
}
func (s Service) getWorkflowIDFromName(workflowName, workflowPreffix string) (string, error) {
optionalWorkflow, err := s.repositoryService.GetWorkflowFromPreffix(workflowName, workflowPreffix)
if err != nil {
return "", errors.WithStack(err)
}
workflow, err := optionalWorkflow.Get()
if err != nil {
return "", errors.WithStack(err)
}
return workflow.ID, nil
}
func replaceCommand(cmds []command, cmd command) []command {
result := make([]command, len(cmds))
for i, c := range cmds {
if c.cobra.Use == cmd.cobra.Use {
result[i] = cmd
continue
}
result[i] = c
}
return result
}
func stage(w w.Workflow, workflowID, stageID string) (config.Stage, error) {
for _, workflow := range w.State.Workflows {
if workflow.ID == workflowID {
for _, stage := range workflow.Stages {
if stage.ID == stageID {
return stage, nil
}
}
return config.Stage{}, errors.New("Invalid stage ID: " + stageID)
}
}
return config.Stage{}, errors.New("Invalid workflow ID: " + workflowID)
}
|
package main
import "fmt"
func funcao1() {
fmt.Println("Função 1")
}
func funcao2(parametro1 string, parametro2 string) {
fmt.Printf("Funcao 2: %s %s\n", parametro1, parametro2)
}
func funcao3() string {
return "Funcao 3"
}
func funcao4(parametro1, parametro2 string) string {
return fmt.Sprintf("Funcao 4: %s %s", parametro1, parametro2)
}
func funcao5() (string, string) {
return "retorno1", "retorno2"
}
func main() {
funcao1()
funcao2("parametro1", "parametro2")
return3, return4 := funcao3(), funcao4("parametro1", "parametro2")
fmt.Println(return3)
fmt.Println(return4)
return51, return52 := funcao5()
fmt.Println(return51)
fmt.Println(return52)
}
|
package iirepo
import (
"path/filepath"
)
// Path returns the repo path.
//
// Note that Path does NOT create this directory itself.
func Path(rootpath string) string {
path := filepath.Join(rootpath, Name())
return path
}
|
package command
import (
"bytes"
"errors"
"io"
"os"
"strings"
"testing"
log "github.com/Sirupsen/logrus"
logrusTestHook "github.com/Sirupsen/logrus/hooks/test"
"github.com/fatih/color"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
clientMock "github.com/quilt/quilt/api/client/mocks"
"github.com/quilt/quilt/db"
"github.com/quilt/quilt/stitch"
"github.com/quilt/quilt/util"
)
type file struct {
path, contents string
}
type runTest struct {
files []file
path string
expExitCode int
expDeployArg string
expEntries []log.Entry
}
func TestRunSpec(t *testing.T) {
os.Setenv("QUILT_PATH", "/quilt_path")
stitch.DefaultImportGetter.Path = "/quilt_path"
exJavascript := `deployment.deploy(new Machine({}));`
exJSON := `{"Machines":[{"ID":"107dee4e67d9a0fead1ef7ac48adc0a5aebbedac",` +
`"CPU":{},"RAM":{}}],"Namespace":"default-namespace"}`
tests := []runTest{
{
files: []file{
{
path: "test.js",
contents: exJavascript,
},
},
path: "test.js",
expExitCode: 0,
expDeployArg: exJSON,
},
{
path: "dne.js",
expExitCode: 1,
expEntries: []log.Entry{
{
Message: "open /quilt_path/dne.js: " +
"file does not exist",
Level: log.ErrorLevel,
},
},
},
{
path: "/dne.js",
expExitCode: 1,
expEntries: []log.Entry{
{
Message: "open /dne.js: file does not exist",
Level: log.ErrorLevel,
},
},
},
{
files: []file{
{
path: "/quilt_path/in_quilt_path.js",
contents: exJavascript,
},
},
path: "in_quilt_path",
expDeployArg: exJSON,
},
// Ensure we print a stacktrace when available.
{
files: []file{
{
path: "/quilt_path/A.js",
contents: `require("B").foo();`,
},
{
path: "/quilt_path/B.js",
contents: `module.exports.foo = function() {
throw new Error("bar");
}`,
},
},
path: "/quilt_path/A.js",
expExitCode: 1,
expEntries: []log.Entry{
{
Message: "Error: bar\n" +
" at /quilt_path/B.js:2:17\n" +
" at /quilt_path/A.js:1:67\n",
Level: log.ErrorLevel,
},
},
},
}
for _, test := range tests {
util.AppFs = afero.NewMemMapFs()
mockGetter := new(clientMock.Getter)
c := &clientMock.Client{}
mockGetter.On("Client", mock.Anything).Return(c, nil)
logHook := logrusTestHook.NewGlobal()
for _, f := range test.files {
util.WriteFile(f.path, []byte(f.contents), 0644)
}
runCmd := NewRunCommand()
runCmd.clientGetter = mockGetter
runCmd.stitch = test.path
exitCode := runCmd.Run()
assert.Equal(t, test.expExitCode, exitCode)
assert.Equal(t, test.expDeployArg, c.DeployArg)
assert.Equal(t, len(test.expEntries), len(logHook.Entries))
for i, entry := range logHook.Entries {
assert.Equal(t, test.expEntries[i].Message, entry.Message)
assert.Equal(t, test.expEntries[i].Level, entry.Level)
}
}
}
type diffTest struct {
curr, new, exp string
}
func TestDeploymentDiff(t *testing.T) {
t.Parallel()
tests := []diffTest{
{
curr: "{}",
new: "{}",
exp: "",
},
{
curr: `{"Machines":[{"Provider":"Amazon"}]}`,
new: `{"Machines":[]}`,
exp: `--- Current
+++ Proposed
@@ -1,7 +1,3 @@
{
- "Machines": [
- {
- "Provider": "Amazon"
- }
- ]
+ "Machines": []
}
`,
},
{
curr: `{"Machines":[{"Provider":"Amazon"},` +
`{"Provider":"Google"}]}`,
new: `{"Machines":[{"Provider":"Google"}]}`,
exp: `--- Current
+++ Proposed
@@ -1,8 +1,5 @@
{
"Machines": [
- {
- "Provider": "Amazon"
- },
{
"Provider": "Google"
}
`,
},
{
curr: `{"Machines":[{"Provider":"Amazon"},` +
`{"Provider":"Google"}]}`,
new: `{"Machines":[{"Provider":"Vagrant"}]}`,
exp: `--- Current
+++ Proposed
@@ -1,10 +1,7 @@
{
"Machines": [
{
- "Provider": "Amazon"
- },
- {
- "Provider": "Google"
+ "Provider": "Vagrant"
}
]
}
`,
},
}
for _, test := range tests {
diff, err := diffDeployment(test.curr, test.new)
assert.Nil(t, err)
assert.Equal(t, test.exp, diff)
}
}
type colorizeTest struct {
original string
exp string
}
func TestColorize(t *testing.T) {
green := "\x1b[32m"
red := "\x1b[31m"
// a reset sequence is inserted after a colorized line
reset := "\x1b[0m"
// force colored output for testing
color.NoColor = false
tests := []colorizeTest{
{
original: "{}",
exp: "{}",
},
{
original: "no color\n" +
"-\tred\n" +
"+\tgreen\n",
exp: "no color\n" +
red + "-\tred\n" + reset +
green + "+\tgreen\n" + reset,
},
{
original: "\n",
exp: "\n",
},
{
original: "\na\n\n",
exp: "\na\n\n",
},
{
original: "+----+---+\n",
exp: green + "+----+---+\n" + reset,
},
}
for _, test := range tests {
colorized := colorizeDiff(test.original)
assert.Equal(t, test.exp, colorized)
}
}
type confirmTest struct {
inputs []string
exp bool
}
func TestConfirm(t *testing.T) {
tests := []confirmTest{
{
inputs: []string{"y"},
exp: true,
},
{
inputs: []string{"yes"},
exp: true,
},
{
inputs: []string{"YES"},
exp: true,
},
{
inputs: []string{"n"},
exp: false,
},
{
inputs: []string{"no"},
exp: false,
},
{
inputs: []string{"foo", "no"},
exp: false,
},
{
inputs: []string{"foo", "no", "yes"},
exp: false,
},
}
for _, test := range tests {
in := bytes.NewBufferString(strings.Join(test.inputs, "\n"))
res, err := confirm(in, "")
assert.Nil(t, err)
assert.Equal(t, test.exp, res)
}
}
func TestPromptsUser(t *testing.T) {
oldConfirm := confirm
defer func() {
confirm = oldConfirm
}()
util.AppFs = afero.NewMemMapFs()
for _, confirmResp := range []bool{true, false} {
confirm = func(in io.Reader, prompt string) (bool, error) {
return confirmResp, nil
}
mockGetter := new(clientMock.Getter)
c := &clientMock.Client{
ClusterReturn: []db.Cluster{
{
Spec: `{"old":"spec"}`,
},
},
}
mockGetter.On("Client", mock.Anything).Return(c, nil)
util.WriteFile("test.js", []byte(""), 0644)
runCmd := NewRunCommand()
runCmd.clientGetter = mockGetter
runCmd.stitch = "test.js"
runCmd.Run()
assert.Equal(t, confirmResp, c.DeployArg != "")
}
}
func TestRunFlags(t *testing.T) {
t.Parallel()
expStitch := "spec"
checkRunParsing(t, []string{"-stitch", expStitch}, Run{stitch: expStitch}, nil)
checkRunParsing(t, []string{expStitch}, Run{stitch: expStitch}, nil)
checkRunParsing(t, []string{"-f", expStitch},
Run{force: true, stitch: expStitch}, nil)
checkRunParsing(t, []string{}, Run{}, errors.New("no spec specified"))
}
func checkRunParsing(t *testing.T, args []string, expFlags Run, expErr error) {
runCmd := NewRunCommand()
err := parseHelper(runCmd, args)
if expErr != nil {
if err.Error() != expErr.Error() {
t.Errorf("Expected error %s, but got %s",
expErr.Error(), err.Error())
}
return
}
assert.Nil(t, err)
assert.Equal(t, expFlags.stitch, runCmd.stitch)
assert.Equal(t, expFlags.force, runCmd.force)
}
|
package datasetapi
import (
"context"
stypes "github.com/lexis-project/lexis-backend-services-interface-datasets.git/client/data_set_management"
"github.com/lexis-project/lexis-backend-services-api.git/models"
"github.com/lexis-project/lexis-backend-services-api.git/restapi/operations/data_set_management"
"github.com/go-openapi/runtime/middleware"
l "gitlab.com/cyclops-utilities/logging"
)
func (p *DataSetAPI) CheckPIDStatus(ctx context.Context, params data_set_management.CheckPIDStatusParams) middleware.Responder {
rparams := stypes.CheckPIDStatusParams{
RequestID: params.RequestID,
}
res, err := p.getClient(params.HTTPRequest).DataSetManagement.CheckPIDStatus(ctx, &rparams)
if err != nil {
l.Info.Printf("Error calling CheckPIDStatus endpoint\n")
switch err.(type) {
case *stypes.CheckPIDStatusBadRequest: // 400
v := err.(*stypes.CheckPIDStatusBadRequest)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckPIDStatusBadRequest().WithPayload(&payload)
case *stypes.CheckPIDStatusUnauthorized: // 401
v := err.(*stypes.CheckPIDStatusUnauthorized)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckPIDStatusUnauthorized().WithPayload(&payload)
case *stypes.CheckPIDStatusNotFound: // 404
v := err.(*stypes.CheckPIDStatusNotFound)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckPIDStatusNotFound().WithPayload(&payload)
case *stypes.CheckPIDStatusRequestURITooLong: // 414
v := err.(*stypes.CheckPIDStatusRequestURITooLong)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckPIDStatusRequestURITooLong().WithPayload(&payload)
case *stypes.CheckPIDStatusInternalServerError: // 500
v := err.(*stypes.CheckPIDStatusInternalServerError)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckPIDStatusInternalServerError().WithPayload(&payload)
default:
payload := fillErrorResponse(err)
return data_set_management.NewCheckPIDStatusInternalServerError().WithPayload(payload)
}
}
payload := models.DataReplication{
Status: res.Payload.Status,
PID: res.Payload.PID,
TargetPath: res.Payload.TargetPath,
}
return data_set_management.NewCheckPIDStatusOK().WithPayload(&payload)
}
func (p *DataSetAPI) CheckReplicateStatus(ctx context.Context, params data_set_management.CheckReplicateStatusParams) middleware.Responder {
rparams := stypes.CheckReplicateStatusParams{
RequestID: params.RequestID,
}
res, err := p.getClient(params.HTTPRequest).DataSetManagement.CheckReplicateStatus(ctx, &rparams)
if err != nil {
l.Info.Printf("Error calling CheckReplicateStatus endpoint\n")
switch err.(type) {
case *stypes.CheckReplicateStatusBadRequest: // 400
v := err.(*stypes.CheckReplicateStatusBadRequest)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckReplicateStatusBadRequest().WithPayload(&payload)
case *stypes.CheckReplicateStatusUnauthorized: // 401
v := err.(*stypes.CheckReplicateStatusUnauthorized)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckReplicateStatusUnauthorized().WithPayload(&payload)
case *stypes.CheckReplicateStatusNotFound: // 404
v := err.(*stypes.CheckReplicateStatusNotFound)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckReplicateStatusNotFound().WithPayload(&payload)
case *stypes.CheckReplicateStatusRequestURITooLong: // 414
v := err.(*stypes.CheckReplicateStatusRequestURITooLong)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckReplicateStatusRequestURITooLong().WithPayload(&payload)
case *stypes.CheckReplicateStatusInternalServerError: // 500
v := err.(*stypes.CheckReplicateStatusInternalServerError)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCheckReplicateStatusInternalServerError().WithPayload(&payload)
default:
payload := fillErrorResponse(err)
return data_set_management.NewCheckReplicateStatusInternalServerError().WithPayload(payload)
}
}
payload := models.DataReplication{
Status: res.Payload.Status,
PID: res.Payload.PID,
}
return data_set_management.NewCheckReplicateStatusOK().WithPayload(&payload)
}
func (p *DataSetAPI) PID(ctx context.Context, params data_set_management.PIDParams) middleware.Responder {
rparams := stypes.PIDParams{
Parameters: stypes.PIDBody{
SourceSystem: params.Parameters.SourceSystem,
SourcePath: params.Parameters.SourcePath,
ParentPid: params.Parameters.ParentPid,
},
}
res, err := p.getClient(params.HTTPRequest).DataSetManagement.PID(ctx, &rparams)
if err != nil {
l.Info.Printf("Error calling PID endpoint\n")
switch err.(type) {
case *stypes.PIDBadRequest: // 400
v := err.(*stypes.PIDBadRequest)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewPIDBadRequest().WithPayload(&payload)
case *stypes.PIDUnauthorized: // 401
v := err.(*stypes.PIDUnauthorized)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewPIDUnauthorized().WithPayload(&payload)
case *stypes.PIDNotFound: // 404
v := err.(*stypes.PIDNotFound)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewPIDNotFound().WithPayload(&payload)
case *stypes.PIDRequestURITooLong: // 414
v := err.(*stypes.PIDRequestURITooLong)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewPIDRequestURITooLong().WithPayload(&payload)
case *stypes.PIDInternalServerError: // 500
v := err.(*stypes.PIDInternalServerError)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewPIDInternalServerError().WithPayload(&payload)
default:
payload := fillErrorResponse(err)
return data_set_management.NewPIDInternalServerError().WithPayload(payload)
}
}
payload := models.SteeringRequestID{
RequestID: res.Payload.RequestID,
}
return data_set_management.NewPIDCreated().WithPayload(&payload)
}
func (p *DataSetAPI) Replicate(ctx context.Context, params data_set_management.ReplicateParams) middleware.Responder {
rparams := stypes.ReplicateParams{
Parameters: stypes.ReplicateBody{
SourceSystem: params.Parameters.SourceSystem,
SourcePath: params.Parameters.SourcePath,
TargetSystem: params.Parameters.TargetSystem,
TargetPath: params.Parameters.TargetPath,
},
}
res, err := p.getClient(params.HTTPRequest).DataSetManagement.Replicate(ctx, &rparams)
if err != nil {
l.Info.Printf("Error calling Replicate endpoint\n")
switch err.(type) {
case *stypes.ReplicateBadRequest: // 400
v := err.(*stypes.ReplicateBadRequest)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewReplicateBadRequest().WithPayload(&payload)
case *stypes.ReplicateUnauthorized: // 401
v := err.(*stypes.ReplicateUnauthorized)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewReplicateUnauthorized().WithPayload(&payload)
case *stypes.ReplicateNotFound: // 404
v := err.(*stypes.ReplicateNotFound)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewReplicateNotFound().WithPayload(&payload)
case *stypes.ReplicateRequestURITooLong: // 414
v := err.(*stypes.ReplicateRequestURITooLong)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewReplicateRequestURITooLong().WithPayload(&payload)
case *stypes.ReplicateInternalServerError: // 500
v := err.(*stypes.ReplicateInternalServerError)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewReplicateInternalServerError().WithPayload(&payload)
default:
payload := fillErrorResponse(err)
return data_set_management.NewReplicateInternalServerError().WithPayload(payload)
}
}
payload := models.SteeringRequestID{
RequestID: res.Payload.RequestID,
}
return data_set_management.NewReplicateCreated().WithPayload(&payload)
}
|
package player
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
pb "gos_rpc_proto"
"gosconf"
"goslib/logger"
"gslib/scene_mgr"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
)
type StreamServer struct {
ConnectAppId string
}
var accountConnectMap = sync.Map{}
var onlinePlayers int32
func OnlinePlayers() int32 {
return onlinePlayers
}
var StreamRpcListenPort string
func StartRpcStream() {
conf := gosconf.RPC_FOR_GAME_APP_STREAM
lis, err := net.Listen(conf.ListenNet, conf.ListenAddr)
StreamRpcListenPort = strconv.Itoa(lis.Addr().(*net.TCPAddr).Port)
logger.INFO("GameAgent lis: ", conf.ListenNet, " addr: ", StreamRpcListenPort)
if err != nil {
logger.ERR("failed to listen: ", err)
}
grpcServer := grpc.NewServer()
pb.RegisterRouteConnectGameServer(grpcServer, &StreamServer{""})
logger.INFO("GameApp started!")
go grpcServer.Serve(lis)
}
// Per stream for per goroutine
func (s *StreamServer) AgentStream(stream pb.RouteConnectGame_AgentStreamServer) error {
return s.startReceiver(stream)
}
func (s *StreamServer) DeployScene(ctx context.Context, in *pb.DeploySceneRequest) (*pb.DeploySceneReply, error) {
success := scene_mgr.TryLoadScene(in.GetSceneId())
return &pb.DeploySceneReply{Success: success}, nil
}
func (s *StreamServer) RequestPlayer(ctx context.Context, in *pb.RequestPlayerRequest) (*pb.RequestPlayerReply, error) {
rpcRsp, err := HandleRPCCall(in.GetAccountId(), in.GetData())
if err != nil {
return nil, err
}
return &pb.RequestPlayerReply{
Data: rpcRsp,
}, nil
}
func (s *StreamServer) startReceiver(stream pb.RouteConnectGame_AgentStreamServer) error {
logger.INFO("gameAgent startReceiver")
headers, _ := metadata.FromIncomingContext(stream.Context())
logger.INFO(headers)
accountId := headers["accountid"][0]
accountConnectMap.Store(accountId, stream)
PlayerConnected(accountId, stream)
atomic.AddInt32(&onlinePlayers, 1)
// Receiving client msg
var err error
var in *pb.RouteMsg
for {
in, err = stream.Recv()
if err == io.EOF {
logger.ERR("GameAgent EOF")
accountConnectMap.Delete(accountId)
break
}
if err != nil {
logger.ERR("GameAgent err: ", err)
break
}
logger.INFO("AgentStream received: ", accountId, " data: ", len(in.GetData()))
HandleRequest(accountId, in.GetData())
}
accountConnectMap.Delete(accountId)
PlayerDisconnected(accountId)
atomic.AddInt32(&onlinePlayers, -1)
return err
}
|
package main
import (
"errors"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
)
type manifestVersionCheck struct {
Version int `yaml:"version"`
}
func checkManifestVersion(filename string) (err error) {
var buf []byte
if buf, err = ioutil.ReadFile(filename); err != nil {
if os.IsNotExist(err) {
err = nil
}
return
}
var m manifestVersionCheck
if err = yaml.Unmarshal(buf, &m); err != nil {
return
}
if m.Version != 0 {
err = errors.New("deployer 不兼容 version: 2 的 deployer.yml 文件,请使用 deployer2")
return
}
return
}
|
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// APIServiceSpec defines the desired state of APIService
type APIServiceSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// Size is the size of the memcached deployment
Size int32 `json:"size"`
// ConfigMap is the the config map name to retrieve message body
// ConfigMap string `json:"config-map"`
}
// APIServiceStatus defines the observed state of APIService
type APIServiceStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// Nodes are the names of the memcached pods
// +listType=set
Nodes []string `json:"nodes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// APIService is the Schema for the apiservices API
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=apiservices,scope=Namespaced
type APIService struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec APIServiceSpec `json:"spec,omitempty"`
Status APIServiceStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// APIServiceList contains a list of APIService
type APIServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []APIService `json:"items"`
}
func init() {
SchemeBuilder.Register(&APIService{}, &APIServiceList{})
}
|
///usr/bin/env go run -ldflags "-X main.Version=shell.run" $0 "$@"; exit
/*
* Trident Setup (tsetup)
*
* tsetup is only meant for initial setup tasks.
* It should be run as the 'postgres' user.
*
* For general use, use 'tcli' or the webinterface and log in.
*/
package main
import (
"trident.li/pitchfork/cmd/setup"
tr "trident.li/trident/src/lib"
)
var Version = "unconfigured"
func main() {
pf_cmd_setup.Setup("tsetup", "trident", tr.AppName, Version, tr.Copyright, tr.Website, tr.AppSchemaVersion, "TRIDENT_SERVER", "http://127.0.0.1:8333", tr.NewTriCtx)
}
|
package main
import "math/rand"
// 跳表
func main() {
}
const (
maxLevel = 16
p = 0.25
)
type Node struct {
Val int
Next []*Node
}
type Skiplist struct {
level int
header *Node // 头节点
}
func Constructor() Skiplist {
return Skiplist{
level: 0, // 默认层为0
header: &Node{
Next: make([]*Node, maxLevel), // 头节点有 maxLevel 个指针
},
}
}
func (this *Skiplist) Search(target int) bool {
cur := this.header // 从头开始
for i := this.level - 1; i >= 0; i-- {
// 从最高层开始,依次向下一层查找
for cur.Next[i] != nil && cur.Next[i].Val < target {
// 如果当前层的值比目标值小,则继续在当前层前进
cur = cur.Next[i]
}
// 如果当前层找到的这个最小值的下一个值不为nil,并且等于目标值,直接返回 true
if cur.Next[i] != nil && cur.Next[i].Val == target {
return true
}
}
// 全局扫一遍都没找到就返回 false
return false
}
func (this *Skiplist) max(a, b int) int {
if a > b {
return a
}
return b
}
func (this *Skiplist) randLevel() int {
l := 1
// 默认 1 层,每增加一层概率缩小
for rand.Float64() < p && l < maxLevel {
l++
}
return l
}
func (this *Skiplist) Add(num int) {
//其实这个变量思考了很久,不知道他在干什么
// 1. updateNode 其实需要记录着每一层对于 num 这个节点找到的最接近的值
// 2. 初始化为 header ,是因为方便简化下面的一个新节点加入的时候逻辑
// 3. 新加入的节点,它可能不会增加新的层,那么他的前驱就是第 1 点描述
// 4. 新加入的节点,它可能会增加新的层,新层的话这个新节点的前驱节点就是 header,第 2点描述
updateNode := make([]*Node, maxLevel)
for i := range updateNode {
updateNode[i] = this.header
}
// 从当前节点开始
cur := this.header
for i := this.level - 1; i >= 0; i-- {
for cur.Next[i] != nil && cur.Next[i].Val < num {
cur = cur.Next[i]
}
updateNode[i] = cur // 记录本层,最接近 num 的节点
}
lv := this.randLevel() // 新节点的层数
newNode := &Node{
Val: num, // num 赋值,这里不判断重复值,重复的依然插入
Next: make([]*Node, lv), // 有几层,就有几个指针
}
for i := 0; i < lv; i++ {
// 从第 0 层开始,对于每一层都要赋值
// 新节点的在第 i 层的下一个节点等于 updateNode 所指向的下一个节点
// updateNode 的下一个节点执行 newNode 这个新节点
newNode.Next[i], updateNode[i].Next[i] = updateNode[i].Next[i], newNode
}
// 跳表最大的层级等于之前的层级和此节点的层级最大值
this.level = this.max(this.level, lv)
}
func (this *Skiplist) Erase(num int) bool {
flag := 0 // 是否触发删除,触发删除逻辑则为 1
newLevel := this.level // 删除节点可能会触发层级降低,因为空的层级没啥意义
cur := this.header
// 依旧是遍历
for i := this.level - 1; i >= 0; i-- {
// 逻辑类似
for cur.Next[i] != nil && cur.Next[i].Val < num {
cur = cur.Next[i]
}
// 我这里的实现和大多数人不一样,但是我觉得这样简单一些
// 这里和 search 实现有点类似,如果找到下一个值和目标值相等
if cur.Next[i] != nil && cur.Next[i].Val == num {
cur.Next[i] = cur.Next[i].Next[i] // 当前节点指向的下一个节点等于当前节点的下下个节点,那么下个节点就相当于删除了,可以思考一下指针
flag = 1 // 修改标志位
if this.header.Next[i] == nil {
// 这里其实是缩层逻辑,如果在这一层,头指针指向了一个空节点,这一层其实就不要了
newLevel--
}
}
}
this.level = newLevel
return flag == 1
}
/**
* Your Skiplist object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Search(target);
* obj.Add(num);
* param_3 := obj.Erase(num);
*/
|
package hardcoded
import (
"github.com/iotaledger/wasp/contracts"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/vm/core"
)
func LocateHardcodedProgram(programHash hashing.HashValue) (string, bool) {
if _, err := core.GetProcessor(programHash); err == nil {
return core.VMType, true
}
if _, ok := contracts.GetExampleProcessor(programHash); ok {
return contracts.VMType, true
}
return "", false
}
|
package transaction
import (
"github.com/babyboy/core/types"
"github.com/babyboy/common"
)
type ResultBack struct {
stable bool
exist bool
utxo types.UTXO
}
type UtxoHelper struct {
Address common.Address
UTXO types.UTXO
IsStable bool
}
|
package frida_go
type PeerOptions struct {
StunServer string
Relays []*Relay
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package install
import (
"fmt"
"log"
"strings"
"time"
pb "github.com/CS-SI/SafeScale/broker"
brokerclient "github.com/CS-SI/SafeScale/broker/client"
"github.com/CS-SI/SafeScale/deploy/install/enums/Action"
)
const (
targetHosts = "hosts"
targetMasters = "masters"
targetPublicNodes = "publicnodes"
targetPrivateNodes = "privatenodes"
)
type stepResult struct {
success bool
err error
}
func (sr stepResult) Successful() bool {
return sr.success
}
func (sr stepResult) Error() error {
return sr.err
}
func (sr stepResult) ErrorMessage() string {
if sr.err != nil {
return sr.err.Error()
}
return ""
}
// stepResults contains the errors of the step for each host target
type stepResults map[string]stepResult
func (s stepResults) ErrorMessages() string {
output := ""
for h, k := range s {
val := k.ErrorMessage()
if val != "" {
output += h + ": " + val + "\n"
}
}
return output
}
func (s stepResults) Successful() bool {
if len(s) == 0 {
return false
}
for _, k := range s {
if !k.Successful() {
return false
}
}
return true
}
type stepTargets map[string]string
// parse converts the content of specification file loaded inside struct to
// standardized values (0, 1 or *)
func (st stepTargets) parse() (string, string, string, string, error) {
var (
hostT, masterT, privnodeT, pubnodeT string
ok bool
)
if hostT, ok = st[targetHosts]; ok {
switch strings.ToLower(hostT) {
case "":
fallthrough
case "false":
fallthrough
case "no":
fallthrough
case "none":
fallthrough
case "0":
hostT = "0"
case "yes":
fallthrough
case "true":
fallthrough
case "1":
hostT = "1"
default:
return "", "", "", "", fmt.Errorf("invalid value '%s' for target '%s'", hostT, targetHosts)
}
}
if masterT, ok = st[targetMasters]; ok {
switch strings.ToLower(masterT) {
case "":
fallthrough
case "false":
fallthrough
case "no":
fallthrough
case "none":
fallthrough
case "0":
masterT = "0"
case "any":
fallthrough
case "one":
fallthrough
case "1":
masterT = "1"
case "all":
fallthrough
case "*":
masterT = "*"
default:
return "", "", "", "", fmt.Errorf("invalid value '%s' for target '%s'", masterT, targetMasters)
}
}
if privnodeT, ok = st[targetPrivateNodes]; ok {
switch strings.ToLower(privnodeT) {
case "":
fallthrough
case "false":
fallthrough
case "no":
fallthrough
case "none":
privnodeT = "0"
case "any":
fallthrough
case "one":
fallthrough
case "1":
privnodeT = "1"
case "all":
fallthrough
case "*":
privnodeT = "*"
default:
return "", "", "", "", fmt.Errorf("invalid value '%s' for target '%s'", privnodeT, targetPrivateNodes)
}
}
if pubnodeT, ok = st[targetPublicNodes]; ok {
switch strings.ToLower(pubnodeT) {
case "":
fallthrough
case "false":
fallthrough
case "no":
fallthrough
case "none":
fallthrough
case "0":
pubnodeT = "0"
case "any":
fallthrough
case "one":
fallthrough
case "1":
pubnodeT = "1"
case "all":
fallthrough
case "*":
pubnodeT = "*"
default:
return "", "", "", "", fmt.Errorf("invalid value '%s' for target '%s'", pubnodeT, targetPublicNodes)
}
}
if hostT == "0" && masterT == "0" && privnodeT == "0" && pubnodeT == "0" {
return "", "", "", "", fmt.Errorf("no targets identified")
}
return hostT, masterT, privnodeT, pubnodeT, nil
}
// step is a struct containing the needed information to apply the installation
// step on all selected host targets
type step struct {
// Worker is a back pointer to the caller
Worker *worker
// Name is the name of the step
Name string
// Action is the action of the step (check, add, remove)
Action Action.Enum
// Targets contains the host targets to select
Targets stepTargets
// Script contains the script to execute
Script string
// WallTime contains the maximum time the step must run
WallTime time.Duration
// YamlKey contains the root yaml key on the specification file
YamlKey string
// OptionsFileContent contains the "options file" if it exists (for DCOS cluster for now)
OptionsFileContent string
// Serial tells if step can be performed in parallel on selected host or not
Serial bool
}
// Run executes the step on all the concerned hosts
func (is *step) Run(hosts []*pb.Host, v Variables, s Settings) (stepResults, error) {
//if debug
if false {
log.Printf("running step '%s' on %d hosts...", is.Name, len(hosts))
}
results := stepResults{}
if is.Serial || s.Serialize {
for _, h := range hosts {
//if debug
if false {
log.Printf("%s(%s):step(%s)@%s: starting\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, h.Name)
}
v["HostIP"] = h.PrivateIP
v["Hostname"] = h.Name
results[h.Name] = is.runOnHost(h, v)
//if debug {
if false {
if !results[h.Name].Successful() {
log.Printf("%s(%s):step(%s)@%s: fail\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, h.Name)
} else {
log.Printf("%s(%s):step(%s)@%s: success\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, h.Name)
}
}
}
} else {
dones := map[string]chan stepResult{}
for _, h := range hosts {
//if debug
if false {
log.Printf("%s(%s):step(%s)@%s: starting\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, h.Name)
}
v["HostIP"] = h.PrivateIP
v["Hostname"] = h.Name
d := make(chan stepResult)
dones[h.Name] = d
go func(host *pb.Host, done chan stepResult) {
done <- is.runOnHost(host, v)
}(h, d)
}
for k, d := range dones {
results[k] = <-d
//if debug {
if false {
if !results[k].Successful() {
log.Printf("%s(%s):step(%s)@%s: fail\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, k)
} else {
log.Printf("%s(%s):step(%s)@%s: done\n", is.Worker.action.String(), is.Worker.feature.DisplayName(), is.Name, k)
}
}
}
}
return results, nil
}
func (is *step) runOnHost(host *pb.Host, v Variables) stepResult {
// Updates variables in step script
command, err := replaceVariablesInString(is.Script, v)
if err != nil {
return stepResult{success: false, err: fmt.Errorf("failed to finalize installer script for step '%s': %s", is.Name, err.Error())}
}
// If options file is defined, upload it to the remote host
if is.OptionsFileContent != "" {
err := UploadStringToRemoteFile(is.OptionsFileContent, host, "/var/tmp/options.json", "cladm", "gpac", "ug+rw-x,o-rwx")
if err != nil {
return stepResult{success: false, err: err}
}
}
// Uploads then executes command
filename := fmt.Sprintf("/var/tmp/%s.feature.%s_%s.sh", is.Worker.feature.BaseFilename(), strings.ToLower(is.Action.String()), is.Name)
err = UploadStringToRemoteFile(command, host, filename, "", "", "")
if err != nil {
return stepResult{success: false, err: err}
}
//if debug {
if true {
command = fmt.Sprintf("sudo bash %s", filename)
} else {
command = fmt.Sprintf("sudo bash %s; rc=$?; sudo rm -f %s /var/tmp/options.json; exit $rc", filename, filename)
}
// Executes the script on the remote host
retcode, _, _, err := brokerclient.New().Ssh.Run(host.Name, command, brokerclient.DefaultConnectionTimeout, is.WallTime)
if err != nil {
return stepResult{success: false, err: err}
}
err = nil
ok := retcode == 0
if !ok {
err = fmt.Errorf("step '%s' failed (retcode=%d)", is.Name, retcode)
}
return stepResult{success: ok, err: err}
}
|
package io
import (
"testing"
)
var fileReader = FileReader{
FilePath: "../file-repository/base_teste.txt",
}
func TestReadFileSuccessfully(t *testing.T) {
err := fileReader.Initialize()
if err != nil {
t.Errorf(err.Error())
}
}
func TestCloseUnopenedFileSuccessfully(t *testing.T) {
fileReaderUnopened := FileReader{
FilePath: "../file-repository/base_teste.txt",
}
err := fileReaderUnopened.CloseFile()
if err == nil {
t.Errorf("shoud've sent error")
}
}
func TestOpenCloseFileSuccessfully(t *testing.T) {
fileReader.Initialize()
err := fileReader.CloseFile()
if err != nil {
t.Errorf("error closing the file: " + err.Error())
}
}
func TestReadFirstLineOfFile(t *testing.T) {
fileReader.Initialize()
hasText := fileReader.Next()
if !hasText {
t.Errorf("file shouldn't be empty")
}
line := fileReader.ReadLine()
if line == "" {
t.Errorf("line shouldn't be empty")
}
if line != "CPF PRIVATE INCOMPLETO DATA DA ÚLTIMA COMPRA TICKET MÉDIO TICKET DA ÚLTIMA COMPRA LOJA MAIS FREQUÊNTE LOJA DA ÚLTIMA COMPRA" {
t.Errorf("line is not the first one")
}
fileReader.CloseFile()
}
|
package recognizer
import (
"time"
)
// InferencesResult type
type InferencesResult struct {
Inferences []Inference `json:"inferences"`
Page struct {
TotalCount int `json:"total_count"`
} `json:"page"`
}
// Inference type
type Inference struct {
ID int `json:"id"`
Score float32 `json:"score"`
Face *face `json:"face"`
Label *Label `json:"label"`
}
type face struct {
ID int `json:"id"`
ImageURL string `json:"image_url"`
Photo *photo `json:"photo"`
}
type photo struct {
SourceURL string `json:"source_url"`
PhotoURL string `json:"photo_url"`
Caption string `json:"caption"`
PostedAt *time.Time `json:"posted_at"`
}
// Label type
type Label struct {
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Twitter string `json:"twitter"`
}
|
package helpers
import (
// System
"os/exec"
// 3rd Party
log "github.com/sirupsen/logrus"
)
type BashCmd struct {
Cmd string
Args []string
ExecPath string
}
// Executes CLI commands
func ExecBashCmd(c *BashCmd) {
// Set command and argument options
cmd := c.Cmd
cmdArgs := c.Args
// Execute command
cmdRun := exec.Command(cmd, cmdArgs...)
cmdRun.Dir = c.ExecPath
// Print stdout & stderr
out, err := cmdRun.CombinedOutput()
if err != nil {
log.Errorf("\t %v", string(out))
log.Fatalf("\t %v", err)
}
}
|
package main
import (
"bytes"
"reflect"
"testing"
)
func TestPKCS7Pad(t *testing.T) {
cases := []struct {
buf []byte
blockSize int
want []byte
}{
{
[]byte{0},
3,
[]byte{0, 2, 2},
},
{
[]byte{0, 0},
3,
[]byte{0, 0, 1},
},
{
[]byte{0, 0, 0},
3,
[]byte{0, 0, 0, 3, 3, 3},
},
}
for _, c := range cases {
got := PKCS7Pad(c.buf, c.blockSize)
if !bytes.Equal(got, c.want) {
t.Errorf("got %v, want %v", got, c.want)
}
}
}
func TestPKCS7Unpad(t *testing.T) {
cases := []struct {
buf []byte
blockSize int
want []byte
}{
{
[]byte{0, 2, 2},
3,
[]byte{0},
},
{
[]byte{0, 0, 1},
3,
[]byte{0, 0},
},
{
[]byte{0, 0, 0, 3, 3, 3},
3,
[]byte{0, 0, 0},
},
}
for _, c := range cases {
got, _ := PKCS7Unpad(c.buf, c.blockSize)
if !bytes.Equal(got, c.want) {
t.Errorf("got %v, want %v", got, c.want)
}
}
}
func TestHasIdenticalBlocks(t *testing.T) {
cases := []struct {
buf []byte
blockSize int
want bool
}{
{
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3},
3,
true,
},
{
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 4, 5, 6},
3,
true,
},
{
[]byte{1, 2, 3, 1, 3, 2, 3, 1, 3, 2, 3, 1},
3,
false,
},
}
for _, c := range cases {
if got := HasIdenticalBlocks(c.buf, c.blockSize); got != c.want {
t.Errorf("got %v, want %v", got, c.want)
}
}
}
func TestSubdivide(t *testing.T) {
cases := []struct {
buf []byte
n int
want [][]byte
}{
{
[]byte{1, 2},
3,
nil,
},
{
[]byte{1, 2, 3, 4, 5, 6},
3,
[][]byte{
{1, 2, 3},
{4, 5, 6},
},
},
{
[]byte{1, 2, 3, 4, 5, 6},
2,
[][]byte{
{1, 2},
{3, 4},
{5, 6},
},
},
}
for _, c := range cases {
got := Subdivide(c.buf, c.n)
if !reflect.DeepEqual(got, c.want) {
t.Errorf("got %v, want %v", got, c.want)
}
}
}
func TestRandomBytes(t *testing.T) {
var bufs [][]byte
for i := 0; i < 5; i++ {
bufs = append(bufs, RandomBytes(16))
for j := 0; j < i; j++ {
if bytes.Equal(bufs[i], bufs[j]) {
t.Errorf("identical buffers %v and %v", bufs[i], bufs[j])
}
}
}
}
func TestRandomInRange(t *testing.T) {
cases := []struct {
lo, hi int
}{
{0, 0},
{5, 10},
{20, 30},
}
for _, c := range cases {
for i := 0; i < 100; i++ {
got := RandomInRange(c.lo, c.hi)
if got < c.lo || got > c.hi {
t.Errorf("got %v, want range [%v, %v]", got, c.lo, c.hi)
}
}
}
}
|
package handler
import (
"github.com/sundogrd/content-api/services/content"
"github.com/gin-gonic/gin"
)
// GetContent ...
func GetContent(c *gin.Context) {
content.ContentRepositoryInstance().FindOne(c, content.FindOneRequest{ID: 1})
c.JSON(200, gin.H{
"msg": "asd",
})
}
// ListContent ...
func ListContent(c *gin.Context) {
c.JSON(200, gin.H{
"msg": "list",
})
}
func CreateContent(c *gin.Context) {
c.JSON(200, gin.H{
"data": "create",
})
}
func UpdateContent(c *gin.Context) {
c.JSON(200, gin.H{
"data": "updated",
})
}
func DeleteContent(c *gin.Context) {
c.JSON(200, gin.H{
"data": "deleted",
})
}
|
package git
import (
"io/ioutil"
"os"
"testing"
)
const (
REMOTENAME = "testremote"
)
func TestClone(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
seedTestRepo(t, repo)
path, err := ioutil.TempDir("", "git2go")
checkFatal(t, err)
ref, err := repo.References.Lookup("refs/heads/master")
checkFatal(t, err)
repo2, err := Clone(repo.Path(), path, &CloneOptions{Bare: true})
defer cleanupTestRepo(t, repo2)
checkFatal(t, err)
ref2, err := repo2.References.Lookup("refs/heads/master")
checkFatal(t, err)
if ref.Cmp(ref2) != 0 {
t.Fatal("reference in clone does not match original ref")
}
}
func TestCloneWithCallback(t *testing.T) {
t.Parallel()
testPayload := 0
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
seedTestRepo(t, repo)
path, err := ioutil.TempDir("", "git2go")
checkFatal(t, err)
opts := CloneOptions{
Bare: true,
RemoteCreateCallback: func(r *Repository, name, url string) (*Remote, error) {
testPayload += 1
return r.Remotes.Create(REMOTENAME, url)
},
}
repo2, err := Clone(repo.Path(), path, &opts)
defer cleanupTestRepo(t, repo2)
checkFatal(t, err)
if testPayload != 1 {
t.Fatal("Payload's value has not been changed")
}
remote, err := repo2.Remotes.Lookup(REMOTENAME)
if err != nil || remote == nil {
t.Fatal("Remote was not created properly")
}
defer remote.Free()
}
// TestCloneWithExternalHTTPUrl
func TestCloneWithExternalHTTPUrl(t *testing.T) {
path, err := ioutil.TempDir("", "git2go")
defer os.RemoveAll(path)
// clone the repo
url := "https://github.com/libgit2/TestGitRepository"
_, err = Clone(url, path, &CloneOptions{})
if err != nil {
t.Fatal("cannot clone remote repo via https, error: ", err)
}
}
|
package main
import (
"fmt"
"math"
)
const (
//width, height = 300, 160 // canvas size in pixels
//cells = 50 // number of grid cells
//xyrange = 15.0 // axis ranges (-xyrange..+xyrange)
//xyscale = width / 2 / xyrange * 5 // pixels per x or y unit
width, height = 600, 320 // canvas size in pixels
cells = 100 // number of grid cells
xyrange = 30.0 // axis ranges (-xyrange..+xyrange)
xyscale = width / 2 / xyrange // pixels per x or y unit
zscale = height * 0.4 // pixels per z unit
angle = math.Pi / 6 // angle of x, y axes (=30°)
)
var sin30, cos30 = math.Sin(angle), math.Cos(angle) // sin(30°), cos(30°)
func main() {
fmt.Printf("<svg xmlns='http://www.w3.org/2000/svg' "+
"style='stroke: grey; fill: white; stroke-width: 0.7' "+
"width='%d' height='%d'>", width, height)
for i := 0; i < cells; i++ {
for j := 0; j < cells; j++ {
ax, ay := corner(i+1, j)
bx, by := corner(i, j)
cx, cy := corner(i, j+1)
dx, dy := corner(i+1, j+1)
if ax == -1 || bx == -1 || cx == -1 || dx == -1 {
continue
}
fmt.Printf("<polygon points='%g,%g %g,%g %g,%g %g,%g'/>\n",
ax, ay, bx, by, cx, cy, dx, dy)
}
}
fmt.Println("</svg>")
}
func corner(i, j int) (float64, float64) {
// Find point (x,y) at corner of cell (i,j).
x := xyrange * (float64(i)/cells - 0.5)
y := xyrange * (float64(j)/cells - 0.5)
// Compute surface height z.
z, ok := saddle(x, y)
// z, ok := eggbox(x, y)
// z, ok := mogle(x, y)
// z, ok := gaussian(x, y)
if !ok {
return -1, -1
}
//z := xyscale / 100 * zz
// Project (x,y,z) isometrically onto 2-D SVG canvas (sx,sy).
sx := width/2 + (x-y)*cos30*xyscale
sy := height/2 + (x+y)*sin30*xyscale - z*zscale
return sx, sy
}
func saddle(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
result := (math.Pow(x, 2) - math.Pow(y, 2)) / (20 * r)
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func eggbox(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
rad := math.Sqrt(math.Pow(x, 2)+math.Pow(y, 2)) / (20 * r)
s := rad * math.Sin(x)
c := rad * math.Cos(y)
result := math.Sqrt(math.Pow(s, 2) + math.Pow(c, 2))
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func mogle(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
rad := math.Sqrt(math.Pow(x, 2)+math.Pow(y, 2)) / (20 * r)
result := rad * math.Sin(x)
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func gaussian(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
result := math.Exp(-(r * r))
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func wave(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
result := math.Sin(x+y) / r
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func sqrtmapping(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
result := math.Sqrt(1+math.Pow(x, 2)+math.Pow(y, 2)) / (2 * r)
if math.IsNaN(result) {
return 0, false
}
return result, true
}
func eggbox2(x, y float64) (float64, bool) {
r := math.Hypot(x, y) // distance from (0,0)
rad := math.Sqrt(math.Pow(x, 2)+math.Pow(y, 2)) / (2 * r)
s := rad * math.Sin(x)
c := rad * math.Cos(y)
t := math.Sqrt(math.Pow(s, 2) + math.Pow(c, 2))
result := rad * math.Cos(t)
if math.IsNaN(result) {
return 0, false
}
return result, true
}
|
package worker
import (
"encoding/hex"
)
// compact is 16-byte representation of 36-characters UUID, 56% smaller, still comparable
type compact [16]byte
// fromUUID returns Compact representation of `uuid`
func fromUUID(uuid string) compact {
c := compact{}
bid := []byte(uuid)
// We expect UUID to be always valid here
_, _ = hex.Decode(c[0:8], bid[0:8])
_, _ = hex.Decode(c[4:6], bid[9:13])
_, _ = hex.Decode(c[6:10], bid[14:18])
_, _ = hex.Decode(c[8:12], bid[19:23])
_, _ = hex.Decode(c[10:16], bid[24:])
return c
}
// String returns standard 36-char uuid representation
func (c compact) String() string {
buf := make([]byte, 36)
hex.Encode(buf[0:8], c[0:4])
buf[8] = '-'
hex.Encode(buf[9:13], c[4:6])
buf[13] = '-'
hex.Encode(buf[14:18], c[6:8])
buf[18] = '-'
hex.Encode(buf[19:23], c[8:10])
buf[23] = '-'
hex.Encode(buf[24:], c[10:])
return string(buf)
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
)
func activeToInactive(current map[[3]int]struct{}) map[[3]int]struct{} {
next := make(map[[3]int]struct{})
for k := range current {
k := k
activeNeighbours := 0
for x := k[0] - 1; x <= k[0]+1; x++ {
for y := k[1] - 1; y <= k[1]+1; y++ {
for z := k[2] - 1; z <= k[2]+1; z++ {
if !(x == k[0] && y == k[1] && z == k[2]) {
if _, ok := current[[3]int{x, y, z}]; ok {
activeNeighbours++
}
}
}
}
}
if activeNeighbours == 2 || activeNeighbours == 3 {
next[k] = struct{}{}
}
}
return next
}
func activeToInactive4D(current map[[4]int]struct{}) map[[4]int]struct{} {
next := make(map[[4]int]struct{})
for k := range current {
k := k
activeNeighbours := 0
for x := k[0] - 1; x <= k[0]+1; x++ {
for y := k[1] - 1; y <= k[1]+1; y++ {
for z := k[2] - 1; z <= k[2]+1; z++ {
for w := k[3] - 1; w <= k[3]+1; w++ {
if !(x == k[0] && y == k[1] && z == k[2] && w == k[3]) {
if _, ok := current[[4]int{x, y, z, w}]; ok {
activeNeighbours++
}
}
}
}
}
}
if activeNeighbours == 2 || activeNeighbours == 3 {
next[k] = struct{}{}
}
}
return next
}
func inactiveToActive(current map[[3]int]struct{}) map[[3]int]struct{} {
next := make(map[[3]int]struct{})
var minX, maxX, minY, maxY, minZ, maxZ int
for k := range current {
if k[0] < minX {
minX = k[0]
} else if k[0] > maxX {
maxX = k[0]
}
if k[1] < minY {
minY = k[1]
} else if k[1] > maxY {
maxY = k[1]
}
if k[2] < minZ {
minZ = k[2]
} else if k[2] > maxZ {
maxZ = k[2]
}
}
for x := minX - 1; x <= maxX+1; x++ {
for y := minY - 1; y <= maxY+1; y++ {
for z := minZ - 1; z <= maxZ+1; z++ {
if _, ok := current[[3]int{x, y, z}]; !ok {
activeNeighbours := 0
for x1 := x - 1; x1 <= x+1; x1++ {
for y1 := y - 1; y1 <= y+1; y1++ {
for z1 := z - 1; z1 <= z+1; z1++ {
if _, ok := current[[3]int{x1, y1, z1}]; ok {
activeNeighbours++
}
}
}
}
if activeNeighbours == 3 {
next[[3]int{x, y, z}] = struct{}{}
}
}
}
}
}
return next
}
func inactiveToActive4D(current map[[4]int]struct{}) map[[4]int]struct{} {
next := make(map[[4]int]struct{})
var minX, maxX, minY, maxY, minZ, maxZ, minW, maxW int
for k := range current {
if k[0] < minX {
minX = k[0]
} else if k[0] > maxX {
maxX = k[0]
}
if k[1] < minY {
minY = k[1]
} else if k[1] > maxY {
maxY = k[1]
}
if k[2] < minZ {
minZ = k[2]
} else if k[2] > maxZ {
maxZ = k[2]
}
if k[3] < minW {
minW = k[3]
} else if k[3] > maxW {
maxW = k[3]
}
}
for x := minX - 1; x <= maxX+1; x++ {
for y := minY - 1; y <= maxY+1; y++ {
for z := minZ - 1; z <= maxZ+1; z++ {
for w := minW - 1; w <= maxW+1; w++ {
if _, ok := current[[4]int{x, y, z, w}]; !ok {
activeNeighbours := 0
for x1 := x - 1; x1 <= x+1; x1++ {
for y1 := y - 1; y1 <= y+1; y1++ {
for z1 := z - 1; z1 <= z+1; z1++ {
for w1 := w - 1; w1 <= w+1; w1++ {
if _, ok := current[[4]int{x1, y1, z1, w1}]; ok {
activeNeighbours++
}
}
}
}
}
if activeNeighbours == 3 {
next[[4]int{x, y, z, w}] = struct{}{}
}
}
}
}
}
}
return next
}
func main() {
content, err := ioutil.ReadFile("input.txt")
if err != nil {
panic(err)
}
active := make(map[[3]int]struct{})
lines := strings.Split(string(content), "\n")
for y, line := range lines {
for x, value := range []rune(line) {
if value == '#' {
active[[3]int{x, y, 0}] = struct{}{}
}
}
}
for i := 1; i <= 6; i++ {
next1 := activeToInactive(active)
next2 := inactiveToActive(active)
active = make(map[[3]int]struct{})
for k := range next1 {
active[k] = struct{}{}
}
for k := range next2 {
active[k] = struct{}{}
}
}
fmt.Printf("result1 = %d\n", len(active))
active2 := make(map[[4]int]struct{})
for y, line := range lines {
for x, value := range []rune(line) {
if value == '#' {
active2[[4]int{x, y, 0, 0}] = struct{}{}
}
}
}
for i := 1; i <= 6; i++ {
next1 := activeToInactive4D(active2)
next2 := inactiveToActive4D(active2)
active2 = make(map[[4]int]struct{})
for k := range next1 {
active2[k] = struct{}{}
}
for k := range next2 {
active2[k] = struct{}{}
}
}
fmt.Printf("result2 = %d\n", len(active2))
}
|
// Copyright 2020 Nokia
// Licensed under the BSD 3-Clause License.
// SPDX-License-Identifier: BSD-3-Clause
package linux
import (
"context"
log "github.com/sirupsen/logrus"
"github.com/srl-labs/containerlab/nodes"
"github.com/srl-labs/containerlab/runtime"
"github.com/srl-labs/containerlab/types"
)
func init() {
nodes.Register(nodes.NodeKindLinux, func() nodes.Node {
return new(linux)
})
}
type linux struct{ cfg *types.NodeConfig }
func (l *linux) Init(cfg *types.NodeConfig, opts ...nodes.NodeOption) error {
l.cfg = cfg
for _, o := range opts {
o(l)
}
return nil
}
func (l *linux) Config() *types.NodeConfig { return l.cfg }
func (l *linux) PreDeploy(configName, labCADir, labCARoot string) error { return nil }
func (l *linux) Deploy(ctx context.Context, r runtime.ContainerRuntime) error {
return r.CreateContainer(ctx, l.cfg)
}
func (l *linux) PostDeploy(ctx context.Context, r runtime.ContainerRuntime, ns map[string]nodes.Node) error {
log.Debugf("Running postdeploy actions for Linux '%s' node", l.cfg.ShortName)
return nil
}
func (l *linux) WithMgmtNet(*types.MgmtNet) {}
func (s *linux) SaveConfig(ctx context.Context, r runtime.ContainerRuntime) error {
return nil
}
|
package _121_Best_Time_to_Buy_and_Sell_Stock
import (
"math"
)
func maxProfit(prices []int) int {
return maxProfitOnce(prices)
}
func maxProfitOnce(prices []int) int {
var max, maxSoFar int
for i := 1; i < len(prices); i++ {
max += prices[i] - prices[i-1]
max = int(math.Max(0, float64(max)))
maxSoFar = int(math.Max(float64(max), float64(maxSoFar)))
}
return maxSoFar
}
// can work, the time is not good, is refused by leetcode.
func maxProfitForce(prices []int) int {
var max int
for i, _ := range prices {
for j := i; j < len(prices); j++ {
x := prices[j] - prices[i]
max = int(math.Max(float64(x), float64(max)))
}
}
return max
}
|
package tags
import ()
// type Tag defines a common interface for working with tag values.
type Tag interface {
// The raw user-defined string tag value.
Raw() string
// The URI-safe value of the raw tag value.
Clean() string
// A boolean flag indicating whether or not the tag can be parsed as a machine tag.
IsMachineTag() bool
// The namespace value of a valid machine tag triple.
Namespace() (string, error)
// The predicate value of a valid machine tag triple.
Predicate() (string, error)
// The value of a valid machine tag triple.
Value() (string, error)
}
|
package main
import (
"fmt"
"strings"
"github.com/Quaqmre/mirjmessage/pb"
"github.com/jroimartin/gocui"
)
func (c *Client) Send(g *gocui.Gui, v *gocui.View) error {
text := v.Buffer()
if text == "" {
return nil
}
sp := []string{text}
if text[0] == '&' {
sp = strings.Split(text, " ")
}
switch sp[0] {
case "&ls":
if sp[1] == "user\n" {
data := c.MakeCommand(pb.Input_LSUSER, "")
c.MarshalEndWrite(data)
}
if sp[1] == "room\n" {
data := c.MakeCommand(pb.Input_LSROOM, "")
c.MarshalEndWrite(data)
}
case "&ch":
data := c.MakeCommand(pb.Input_CHNAME, sp[1][:len(sp[1])-1])
c.MarshalEndWrite(data)
case "&joın":
data := c.MakeCommand(pb.Input_JOIN, sp[1][:len(sp[1])-1])
c.MarshalEndWrite(data)
case "&mk":
data := c.MakeCommand(pb.Input_MKROOM, sp[1][:len(sp[1])-1])
c.MarshalEndWrite(data)
case "&ext\n":
data := c.MakeCommand(pb.Input_EXIT, "")
c.MarshalEndWrite(data)
default:
data := c.MakeMessage(text)
c.MarshalEndWrite(data)
}
g.Update(func(g *gocui.Gui) error {
w, _ := g.View("messages")
fmt.Fprint(w, text)
v.Clear()
v.SetCursor(0, 0)
v.SetOrigin(0, 0)
return nil
})
return nil
}
// Connect test
func Connect(g *gocui.Gui, v *gocui.View) error {
g.SetViewOnTop("messages")
g.SetViewOnTop("input")
g.SetCurrentView("input")
messagesView, _ := g.View("messages")
g.Update(func(g *gocui.Gui) error {
fmt.Fprintln(messagesView, "asdasdasd")
return nil
})
return nil
}
// Layout test
func Layout(g *gocui.Gui) error {
maxX, maxY := g.Size()
g.Cursor = true
if messages, err := g.SetView("messages", 0, 0, maxX-1, maxY-5); err != nil {
if err != gocui.ErrUnknownView {
return err
}
messages.Title = " messages: "
messages.Autoscroll = true
messages.Wrap = true
}
if input, err := g.SetView("input", 0, maxY-5, maxX-1, maxY-1); err != nil {
if err != gocui.ErrUnknownView {
return err
}
g.SetCurrentView("input")
input.Title = " send: "
input.Autoscroll = false
input.Wrap = true
input.Editable = true
}
return nil
}
|
package main
import (
"fmt"
"strings"
)
func main(){
var x string
fmt.Scan(&x)
x=strings.ToLower(x)
n :=len(x)
if n<3{
fmt.Println("Not Found")
}else{
flag:=0
if x[0]=='i' && x[n-1]=='n'{
for i:=1; i<n; i++{
if x[i]=='a'{
flag=1
break
}
}
}
if flag==0{
fmt.Println("Not Found")
}else{
fmt.Println("Found")
}
}
}
|
package core
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestExpandHomePath(t *testing.T) {
HOME := os.Getenv("HOME")
cases := []struct {
in, want string
}{
{"", ""},
{"~", HOME},
{"~username", "~username"},
{"~:/bin/~:/usr/local", HOME + ":/bin/~:/usr/local"},
{"/bin:~/bin:~/script:/usr/local/bin",
"/bin:" + HOME + "/bin:" + HOME + "/script:/usr/local/bin"},
}
for _, c := range cases {
assert.Equal(t, c.want, ExpandHomePath(c.in))
}
}
func TestReplaceEnvironment(t *testing.T) {
env := BuildEnv{
"TMP_DIR=/home/user/please/src/core",
"PKG=src/core",
"SRCS=core.go build_env.go",
}
assert.Equal(t,
"/home/user/please/src/core src/core core.go build_env.go",
os.Expand("$TMP_DIR ${PKG} ${SRCS}", env.ReplaceEnvironment))
assert.Equal(t, "", os.Expand("$WIBBLE", env.ReplaceEnvironment))
}
func TestRedact(t *testing.T) {
env := BuildEnv{
"WHATEVER=12345",
"GPG_PASSWORD=54321",
"ULTIMATE_MEGASECRET=42",
}
expected := BuildEnv{
"WHATEVER=12345",
"GPG_PASSWORD=************",
"ULTIMATE_MEGASECRET=************",
}
assert.EqualValues(t, expected, env.Redacted())
}
func TestString(t *testing.T) {
env := BuildEnv{
"A=B",
"C=D",
}
assert.EqualValues(t, "A=B\nC=D", env.String())
}
|
package limiter
import "sync"
// 限制上限
type LimitListener struct {
sem chan struct{} // 当前数量(=上限 阻塞)
done chan struct{} // 限流器关闭
closeOnce sync.Once
}
func NewLimitListener(n int) *LimitListener {
return &LimitListener{
sem: make(chan struct{}, n),
done: make(chan struct{}),
}
}
func (limiter *LimitListener) Acquire() bool {
select {
case <-limiter.done:
return false
case limiter.sem <- struct{}{}: // sem满:阻塞
return true
}
}
func (limiter *LimitListener) Release() {
<-limiter.sem
}
func (limiter *LimitListener) Close() {
limiter.closeOnce.Do(func() {
close(limiter.done)
})
}
|
package main
import "testing"
func TestBinarySum(t *testing.T) {
cases := []struct {
name string
A []int
B []int
want []int
}{
{"0 + 0", []int{0}, []int{0}, []int{0, 0}},
{"0 + 1", []int{0}, []int{1}, []int{0, 1}},
{"1 + 0", []int{1}, []int{0}, []int{0, 1}},
{"1 + 1", []int{1}, []int{1}, []int{1, 0}},
{"10 + 10", []int{1, 0}, []int{1, 0}, []int{1, 0, 0}},
{"11 + 10", []int{1, 1}, []int{1, 0}, []int{1, 0, 1}},
{"11 + 11", []int{1, 1}, []int{1, 1}, []int{1, 1, 0}},
{"1101 + 1011", []int{1, 1, 0, 1}, []int{1, 0, 1, 1}, []int{1, 1, 0, 0, 0}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
got, _ := BinarySum(c.A, c.B)
AssertIntArraysEqual(t, got, c.want)
})
}
t.Run("should return error if input lengths are different", func(t *testing.T) {
A, B := []int{1, 1, 0}, []int{1, 1, 0, 1}
_, err := BinarySum(A, B)
AssertError(t, err, DifferentArrayLengthError)
})
}
func BenchmarkBinarySum(b *testing.B) {
benches := []struct {
name string
n int
}{
{"100 digit sum", 100},
{"1,000 digit sum", 1000},
{"100,000 digit sum", 100000},
}
for _, bench := range benches {
A, B := MakeRandIntArray(bench.n, 2), MakeRandIntArray(bench.n, 2)
b.Run(bench.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
BinarySum(A, B)
}
})
}
}
|
package shell
import (
"bytes"
"fmt"
"path/filepath"
"testing"
"github.com/gruntwork-io/go-commons/logging"
"github.com/stretchr/testify/assert"
)
func TestRunShellCommand(t *testing.T) {
t.Parallel()
assert.NoError(t, RunShellCommand(NewShellOptions(), "echo", "hi"))
}
func TestRunShellCommandInvalidCommand(t *testing.T) {
t.Parallel()
assert.Error(t, RunShellCommand(NewShellOptions(), "not-a-real-command"))
}
func TestRunShellCommandAndGetOutput(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetOutput(NewShellOptions(), "echo", "hi")
assert.NoError(t, err)
assert.Equal(t, "hi\n", out)
}
func TestRunShellCommandAndGetOutputNoTerminatingNewLine(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetOutput(NewShellOptions(), "echo", "-n", "hi")
assert.NoError(t, err)
assert.Equal(t, "hi", out)
}
func TestRunShellCommandAndGetStdoutReturnsStdout(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetStdout(NewShellOptions(), "echo", "hi")
assert.NoError(t, err)
assert.Equal(t, "hi\n", out)
}
func TestRunShellCommandAndGetStdoutDoesNotReturnStderr(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetStdout(NewShellOptions(), filepath.Join("test-fixture", "echo_hi_stderr.sh"))
assert.NoError(t, err)
assert.Equal(t, "", out)
}
func TestRunShellCommandAndGetStdoutAndStreamOutputDoesNotReturnStderr(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetStdoutAndStreamOutput(NewShellOptions(), filepath.Join("test-fixture", "echo_hi_stderr.sh"))
assert.NoError(t, err)
assert.Equal(t, "", out)
}
func TestRunShellCommandAndGetAndStreamOutput(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetAndStreamOutput(NewShellOptions(), filepath.Join("test-fixture", "echo_stdoutstderr.sh"))
assert.NoError(t, err)
assert.Equal(t, "hello\nworld\n", out)
}
func TestRunShellCommandAndGetOutputStruct(t *testing.T) {
t.Parallel()
out, err := RunShellCommandAndGetOutputStruct(NewShellOptions(), filepath.Join("test-fixture", "echo_stdoutstderr.sh"))
assert.NoError(t, err)
assert.Equal(t, "hello\nworld\n", out.Combined())
assert.Equal(t, "hello\n", out.Stdout())
assert.Equal(t, "world\n", out.Stderr())
}
func TestRunShellCommandWithEnv(t *testing.T) {
t.Parallel()
envVars := map[string]string{
"TEST_WITH_SPACES": "test with spaces",
"TEST_WITH_EQUALS": "test=with=equals",
"TEST_START_EQUALS": "=teststartequals",
"TEST_BLANK": "",
}
options := NewShellOptions()
options.Env = envVars
for k, v := range envVars {
out, err := RunShellCommandAndGetOutput(options, "bash", "-c", fmt.Sprintf("echo $%s", k))
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%s\n", v), out)
}
}
func TestCommandInstalledOnValidCommand(t *testing.T) {
t.Parallel()
assert.True(t, CommandInstalled("echo"))
}
func TestCommandInstalledOnInvalidCommand(t *testing.T) {
t.Parallel()
assert.False(t, CommandInstalled("not-a-real-command"))
}
func TestCommandInstalledEOnValidCommand(t *testing.T) {
t.Parallel()
assert.NoError(t, CommandInstalledE("echo"))
}
func TestCommandInstalledEOnInvalidCommand(t *testing.T) {
t.Parallel()
assert.Error(t, CommandInstalledE("not-a-real-command"))
}
// Test that when SensitiveArgs is true, do not log the args
func TestSensitiveArgsTrueHidesOnRunShellCommand(t *testing.T) {
t.Parallel()
buffer := bytes.NewBufferString("")
logger := logging.GetLogger("")
logger.Out = buffer
options := NewShellOptions()
options.SensitiveArgs = true
options.Logger = logger
assert.NoError(t, RunShellCommand(options, "echo", "hi"))
assert.NotContains(t, buffer.String(), "hi")
assert.Contains(t, buffer.String(), "echo")
}
// Test that when SensitiveArgs is false, log the args
func TestSensitiveArgsFalseShowsOnRunShellCommand(t *testing.T) {
t.Parallel()
buffer := bytes.NewBufferString("")
logger := logging.GetLogger("")
logger.Out = buffer
options := NewShellOptions()
options.Logger = logger
assert.NoError(t, RunShellCommand(options, "echo", "hi"))
assert.Contains(t, buffer.String(), "hi")
assert.Contains(t, buffer.String(), "echo")
}
// Test that when SensitiveArgs is true, do not log the args
func TestSensitiveArgsTrueHidesOnRunShellCommandAndGetOutput(t *testing.T) {
t.Parallel()
buffer := bytes.NewBufferString("")
logger := logging.GetLogger("")
logger.Out = buffer
options := NewShellOptions()
options.SensitiveArgs = true
options.Logger = logger
_, err := RunShellCommandAndGetOutput(options, "echo", "hi")
assert.NoError(t, err)
assert.NotContains(t, buffer.String(), "hi")
assert.Contains(t, buffer.String(), "echo")
}
// Test that when SensitiveArgs is false, log the args
func TestSensitiveArgsFalseShowsOnRunShellCommandAndGetOutput(t *testing.T) {
t.Parallel()
buffer := bytes.NewBufferString("")
logger := logging.GetLogger("")
logger.Out = buffer
options := NewShellOptions()
options.Logger = logger
_, err := RunShellCommandAndGetOutput(options, "echo", "hi")
assert.NoError(t, err)
assert.Contains(t, buffer.String(), "hi")
assert.Contains(t, buffer.String(), "echo")
}
|
package main
import (
"log"
"os"
)
func main() {
//Exit
/*
os.Exit(1)
fmt.Println("Start")
*/
/*
//deferも実行されない
defer func() {
fmt.Println("defer")
}()
os.Exit(0)
*/
//log.Fatal()
_, err := os.Open("A.txt")
if err != nil {
log.Fatalln(err)
}
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
// 前提:ゲーム仕様の解釈
// ゲームの得点:プレイヤーごとに0~100点、ゲーム終了条件は余白部、全ゲーム終了でプレイヤーごとに点数集計
// 1ゲームの総得点数:32
const gamePoints int = 32
const totalraounds int = 10
type player struct {
name string
scores map[round]score
}
func (p *player) addScore(r round) {
if p.getTotalScore() < 100 {
p.scores[r] = p.scores[r] + 1
}
}
func (p *player) showScore() int {
var total int
for k, v := range p.scores {
fmt.Printf("%s 第%dラウンド:%d\n", p.name, k, v)
total += int(v)
}
fmt.Printf("%s トータル:%d\n\n", p.name, total)
return total
}
func (p *player) getTotalScore() int {
var total int
for _, s := range p.scores {
total += int(s)
}
return total
}
type round int32
type score int32
type game struct {
gameround round
participant []*player
}
func (g *game) dogame() {
// game logic ランダムに加点
fmt.Printf("第%dラウンド開始\n", g.gameround)
for i := 0; i < gamePoints; i++ {
t := time.Now().UnixNano()
rand.Seed(t)
n := rand.Intn(len(g.participant))
g.participant[n].addScore(g.gameround)
}
}
type games struct {
games []*game
}
func (gs *games) start(rounds int) {
ps := []*player{{"taro", make(map[round]score)}, {"jiro", make(map[round]score)}, {"saburo", make(map[round]score)}, {"shiro", make(map[round]score)}}
for i := 1; i <= rounds; i++ {
g := &game{round(i), ps}
gs.games = append(gs.games, g)
g.dogame()
}
gs.result(ps)
}
func (gs *games) result(ps []*player) {
fmt.Printf("end game------------\n\n")
var winner *player
var topScore int = 0
for _, p := range ps {
s := p.showScore()
if topScore < s {
topScore = s
winner = p
}
}
fmt.Printf("------------------------\n 勝者は%d点で%s!!\n", winner.getTotalScore(), winner.name)
}
func main() {
gs := games{make([]*game, 0)}
gs.start(totalraounds)
}
|
package main
import (
"fmt"
"net"
"os"
pb "github.com/semi-technologies/contextionary/contextionary"
core "github.com/semi-technologies/contextionary/contextionary/core"
"github.com/semi-technologies/contextionary/extensions"
"github.com/semi-technologies/contextionary/server/config"
"github.com/sirupsen/logrus"
grpc "google.golang.org/grpc"
)
// Version is filled through a build arg
var Version string
func main() {
server := new()
server.logger.WithField("version", Version).Info()
grpcServer := grpc.NewServer()
pb.RegisterContextionaryServer(grpcServer, server)
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", server.config.ServerPort))
if err != nil {
server.logger.Errorf("can't listen on port: %s", err)
os.Exit(1)
}
grpcServer.Serve(lis)
}
type server struct {
// to be used to serve rpc requests, combination of the raw contextionary
// and the schema
combinedContextionary core.Contextionary
// initialized at startup, to be used to build the
// schema contextionary
rawContextionary core.Contextionary
config *config.Config
logger logrus.FieldLogger
// ucs
extensionStorer *extensions.Storer
extensionLookerUpper extensionLookerUpper
stopwordDetector stopwordDetector
vectorizer *Vectorizer
}
// new gRPC server to serve the contextionary
func new() *server {
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
cfg, err := config.New(logger)
if err != nil {
logger.
WithError(err).
Errorf("cannot start up")
os.Exit(1)
}
loglevel, err := logrus.ParseLevel(cfg.LogLevel)
if err != nil {
logger.
WithError(err).
Errorf("cannot start up")
os.Exit(1)
}
logger.SetLevel(loglevel)
logger.WithField("log_level", loglevel.String()).Info()
s := &server{
config: cfg,
logger: logger,
}
err = s.init()
if err != nil {
logger.
WithError(err).
Errorf("cannot start up")
os.Exit(1)
}
return s
}
|
package unity
import (
"reflect"
"testing"
)
func TestVersionFromString(t *testing.T) {
tests := []struct {
name string
input string
want VersionData
wantErr bool
}{
{
name: "no_hash",
input: "2020.3.30f1",
want: VersionData{
Major: 2020,
Minor: 3,
Update: 30,
VerType: "f",
Patch: 1,
},
wantErr: false,
},
{
name: "invalid_ver_type",
input: "2020.3.30g1",
want: VersionData{},
wantErr: true,
},
{
name: "invalid_number",
input: "202f0.3.30f1",
want: VersionData{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := VersionFromString(tt.input)
if (err != nil) != tt.wantErr {
t.Errorf("VersionFromString() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("VersionFromString() got = %v, want %v", got, tt.want)
}
})
}
}
|
package stemmer
import (
"bufio"
"os"
"strings"
"testing"
)
func Test_stem(t *testing.T) {
//read vocabulary
fVoc, err := os.Open("voc.txt")
if err != nil {
panic(err)
}
vocab := bufio.NewReader(fVoc)
//read output
fOut, err := os.Open("output.txt")
if err != nil {
panic(err)
}
output := bufio.NewReader(fOut)
for word, errV := vocab.ReadSlice('\n'); errV == nil; word, errV = vocab.ReadSlice('\n') {
stem, errO := output.ReadSlice('\n')
if errO != nil {
panic(err)
}
sWord := strings.TrimSpace(string(word))
sStem := strings.TrimSpace(string(stem))
stemRes := Stem(sWord)
if stemRes != string(sStem) {
t.Error(
"For", sWord,
"expected", sStem,
"got", stemRes,
)
}
}
}
|
package main
import (
"fmt"
// 需要处理json相关操作,则需要导入此包
"encoding/json"
)
// 定义学生结构体
type Student1 struct {
// 属性名称的首字母,只有大写才能被转到json中
Id string
Name string
Course []string
Age int
}
type Student2 struct {
// 属性名称的首字母,只有大写才能被转到json中
// 由于首字母是大写,导致json串中也是大写,这里可以通设置属性标签使其转换成小写
Id string `json:"id"` // 在json中变成首字母小写
Name string `json:"name"`
Course []string `json:"course"`
Age int `json:"age,string"` // 在json中变成首字母小写,同时值变成字符串
}
// 通过结构体构建json数据
func main() {
s1 := Student1{Id: "001", Name: "neil", Course: []string{"Java", "Go", "JavaScript"}, Age: 28}
// 此方法返回2个值,第1值是字节切片,第2值是错误信息
json1, err1 := json.Marshal(s1)
if err1 == nil {
fmt.Println(string(json1))
} else {
fmt.Println("err = ", err1)
}
// 结果为:
// {"Id":"001","Name":"neil","Course":["Java","Go","JavaScript"],"Age":28}
s2 := Student2{Id: "001", Name: "neil", Course: []string{"Java", "Go", "JavaScript"}, Age: 28}
// 此方法返回2个值,第1值是字节切片,第2值是错误信息
json2, err2 := json.Marshal(s2)
if err2 == nil {
fmt.Println(string(json2))
} else {
fmt.Println("err2 = ", err2)
}
// 结果为:
// {"id":"001","name":"neil","course":["Java","Go","JavaScript"],"age":"28"}
}
|
/******************************************************************************
Online Go Lang Compiler.
Code, Compile, Run and Debug Go Lang program online.
Write your code in this editor and press "Run" button to execute it.
*******************************************************************************/
package main
// imported package(if not used shows error)
import "fmt"
import "math/rand"
import "time"
// if something is declared global but not used, it will not provide error
var x, y, z = 123, true, "foo"
// init function is called before main and executes sequencially
// we can have multiple init funciton
func init() {
fmt.Println("hi,", "Amar")
}
func main() {
// use of if-else
if n := 25; n%2 == 0 {
fmt.Println(n, "is an even number.")
} else {
fmt.Println(n, "is an odd number.")
}
// we can keep conditional statement in ()
if 25<23 {
println("yes")
} else {
println("No")
}
for i := 0; i < 3; i++ {
fmt.Println(i)
}
var i = 0
for ; i < 2; {
fmt.Println(i)
i++
}
for i < 4 {
fmt.Println(i)
i++
}
for i<10 {
fmt.Println(i)
if i>=6 {
break
}
i++
}
// continue and break is similar to C/C++
// infinite loop
/*for ; true; {
}
for true {
}
for ; ; {
}
for {
}*/
rand.Seed(time.Now().UnixNano())
switch n := rand.Intn(100); n%9 {
case 0:
fmt.Println(n, "is a multiple of 9.")
case 1, 2, 3:
fmt.Println(n, "mod 9 is 1, 2 or 3.")
// here, this "break" statement is nonsense.
break
case 4, 5, 6:
fmt.Println(n, "mod 9 is 4, 5 or 6.")
default:
fmt.Println(n, "mod 9 is 7 or 8.")
}
}
func init() {
fmt.Println("hi,", "Kumar")
}
|
package processor
import (
"github.com/vmware/govmomi/vim25/types"
)
// Processor handles incoming vCenter events. This enables different FaaS
// implementations for vCenter event processing. Note: in the case of processing
// failure the current behavior is to log but return nil until at-least-once
// semantics are implemented.
type Processor interface {
Process(types.ManagedObjectReference, []types.BaseEvent) error
}
|
package main
import (
"fmt"
)
//凱薩密碼
func main() {
var offsetVal rune = -12
info := Encode("description more or less taken from Wikipedia", offsetVal)
fmt.Println(string(info))
info1 := Decode(info, offsetVal)
fmt.Println(string(info1))
}
//凱薩密碼加密
func Encode(s string, offset rune) string {
var word = []rune(s)
for i, val := range word {
if (val > 64 && val < 91) {
//大寫
word[i] = encodeOffsetAZ(val, offset)
} else if (word[i] > 96 && word[i] < 123) {
//小寫
word[i] = encodeOffsetaz(val, offset)
}
}
bs := []byte(string(word))
return string(bs)
}
//凱薩密碼解密
func Decode(s string, offset rune) string {
var word = []rune(s)
for i, val := range word {
if (val > 64 && val < 91) {
//大寫
word[i] = decodeOffsetAZ(val, offset)
} else if (word[i] > 96 && word[i] < 123) {
//小寫
word[i] = decodeOffsetaz(val, offset)
}
}
bs := []byte(string(word))
return string(bs)
}
//小寫英文偏移加密
func encodeOffsetaz(s rune, offset rune) rune {
var val rune = ((s - 96) + offset) % 26
lastVal := 122 + val;
if ( val > 0) {
lastVal = 96 + val;
}
return lastVal
}
//大寫英文偏移加密
func encodeOffsetAZ(s rune, offset rune) rune {
var val rune = ((s - 64) + offset) % 26
lastVal := 90 + val;
if ( val > 0) {
lastVal = 64 + val;
}
return lastVal
}
//解密偏移小寫的英文字
func decodeOffsetaz(s rune, offset rune) rune {
var val rune = ((s - 96) - offset) % 26
lastVal := 122 + val;
if ( val > 0) {
lastVal = 96 + val;
}
return lastVal
}
//解密偏移大寫的英文字
func decodeOffsetAZ(s rune, offset rune) rune {
var val rune = ((s - 64) - offset) % 26
lastVal := 90 + val;
if ( val > 0) {
lastVal = 64 + val;
}
return lastVal
}
|
package main
import (
"fmt"
"regexp"
"bufio"
"os"
)
func getNameFromConsole() string {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Input name: ")
input, _ := reader.ReadString('\n')
return input
}
func getInitials(name string) string {
re :=regexp.MustCompile("([a-z])|(\\s)")
initials := re.ReplaceAllString(name,"")
return initials
}
func printInitials(initials string) {
fmt.Print("Your initials:")
fmt.Print(initials)
}
func main() {
printInitials(getInitials(getNameFromConsole()))
}
|
package rest
import (
id "github.com/jinmukeji/go-pkg/v2/id-gen"
"github.com/kataras/iris/v12"
)
const (
// ContextCidKey 上下文中注入的 cid 的键
ContextCidKey = "cid"
)
// CidMiddleware cid中间件
func CidMiddleware(ctx iris.Context) {
cid := id.NewXid()
ctx.Values().Set(ContextCidKey, cid)
ctx.Next()
}
// GetCidFromContext 从context得到cid
func GetCidFromContext(ctx iris.Context) string {
return ctx.Values().GetString(ContextCidKey)
}
|
/*
Gaussian blur is a method used for blurring images smoothly. It involves creating a matrix which will be used by convolving it with the pixels of an image.
In this challenge, your task is to construct that matrix used in Gaussian blur. You will take an input r which will be the radius of the blur and an input σ which will be the standard deviation in order to construct a matrix with dimensions (2 r + 1 × 2 r + 1).
Each value in that matrix will have an (x, y) value that depends on its absolute distance in each direction from the center and will be used to compute G(x, y) where the formula G is
formula
For example, if r = 2, we want to generate a 5 x 5 matrix. First, the matrix of (x, y) values is
(2, 2) (1, 2) (0, 2) (1, 2) (2, 2)
(2, 1) (1, 1) (0, 1) (1, 1) (2, 1)
(2, 0) (1, 0) (0, 0) (1, 0) (2, 0)
(2, 1) (1, 1) (0, 1) (1, 1) (2, 1)
(2, 2) (1, 2) (0, 2) (1, 2) (2, 2)
Then, let σ = 1.5 and apply G to each (x, y)
0.0119552 0.0232856 0.0290802 0.0232856 0.0119552
0.0232856 0.0453542 0.0566406 0.0453542 0.0232856
0.0290802 0.0566406 0.0707355 0.0566406 0.0290802
0.0232856 0.0453542 0.0566406 0.0453542 0.0232856
0.0119552 0.0232856 0.0290802 0.0232856 0.0119552
Normally in image blurring, this matrix would be normalized by taking the sum of all the values in that matrix and dividing by it. For this challenge, that is not needed and the raw values calculated by the formula is what the output should be.
Rules
This is code-golf so the shortest code wins.
The input r will be a nonnegative integer and σ will be a positive real number.
The output must represent a matrix. It can be formatted as a 2d array, a string representing a 2d array, or something similar.
Floating-point inaccuracies will not be counted against you.
Test Cases
(r, σ) = (0, 0.25)
2.54648
(1, 7)
0.00318244 0.00321509 0.00318244
0.00321509 0.00324806 0.00321509
0.00318244 0.00321509 0.00318244
(3, 2.5)
0.00603332 0.00900065 0.0114421 0.012395 0.0114421 0.00900065 0.00603332
0.00900065 0.0134274 0.0170696 0.0184912 0.0170696 0.0134274 0.00900065
0.0114421 0.0170696 0.0216997 0.023507 0.0216997 0.0170696 0.0114421
0.012395 0.0184912 0.023507 0.0254648 0.023507 0.0184912 0.012395
0.0114421 0.0170696 0.0216997 0.023507 0.0216997 0.0170696 0.0114421
0.00900065 0.0134274 0.0170696 0.0184912 0.0170696 0.0134274 0.00900065
0.00603332 0.00900065 0.0114421 0.012395 0.0114421 0.00900065 0.00603332
(4, 3.33)
0.00339074 0.00464913 0.00582484 0.00666854 0.00697611 0.00666854 0.00582484 0.00464913 0.00339074
0.00464913 0.00637454 0.00798657 0.0091434 0.00956511 0.0091434 0.00798657 0.00637454 0.00464913
0.00582484 0.00798657 0.0100063 0.0114556 0.011984 0.0114556 0.0100063 0.00798657 0.00582484
0.00666854 0.0091434 0.0114556 0.013115 0.0137198 0.013115 0.0114556 0.0091434 0.00666854
0.00697611 0.00956511 0.011984 0.0137198 0.0143526 0.0137198 0.011984 0.00956511 0.00697611
0.00666854 0.0091434 0.0114556 0.013115 0.0137198 0.013115 0.0114556 0.0091434 0.00666854
0.00582484 0.00798657 0.0100063 0.0114556 0.011984 0.0114556 0.0100063 0.00798657 0.00582484
0.00464913 0.00637454 0.00798657 0.0091434 0.00956511 0.0091434 0.00798657 0.00637454 0.00464913
0.00339074 0.00464913 0.00582484 0.00666854 0.00697611 0.00666854 0.00582484 0.00464913 0.00339074
*/
package main
import (
"fmt"
"math"
)
func main() {
test(2, 1.5)
test(0, 0.25)
test(1, 7)
test(3, 2.5)
test(4, 3.33)
}
func test(r int, s float64) {
m := gaussian(r, s)
dump(m)
}
func dump(m [][]float64) {
for i := range m {
for j := range m[i] {
fmt.Printf("%.8f ", m[i][j])
}
fmt.Println()
}
fmt.Println()
}
func gaussian(r int, s float64) [][]float64 {
n := 2*r + 1
m := make([][]float64, n)
t := make([]float64, n*n)
for i := range m {
m[i] = t[i*n : (i+1)*n]
}
for i := range m {
y := float64(i - n/2)
for j := range m {
x := float64(j - n/2)
m[i][j] = kernel(x, y, s)
}
}
return m
}
func kernel(x, y, s float64) float64 {
return math.Exp(-(x*x+y*y)/(2*s*s)) / (2 * math.Pi * s * s)
}
|
package main
import (
"fmt"
"sort"
"strings"
)
func main() {
s := "OVGHK"
t := "RPGUC"
sMap := make(map[string]int)
total := 0
strS := ""
strT := ""
for _, x := range s {
sMap[string(x)] = 0
}
for key, _ := range sMap {
if !(strings.Contains(t, key)) {
total += strings.Count(s, key)
} else {
numS := strings.Count(s, key)
numT := strings.Count(t, key)
strS += strings.Repeat(key, numS)
strT += strings.Repeat(key, numT)
}
}
fmt.Println(total)
//fmt.Println(strS)
//fmt.Println(strT)
sArr := strings.Split(strS, "")
tArr := strings.Split(strT, "")
sort.Strings(sArr)
sort.Strings(tArr)
fmt.Println(sArr)
fmt.Println(tArr)
for i := 0; i < len(sArr); i++ {
if string(sArr[i]) != string(tArr[i]) {
total++
}
}
fmt.Println(total)
}
|
package app
import (
"sync"
"time"
)
type Appl interface {
// GetNewestRecord returns newest or nil if there are no records.
GetNewestRecord() *Record
AddRecord(Record) error
TopicInitialized
TopicRecordAdded
}
type Record struct {
ID int
Start time.Time
Duration time.Duration // 15m…23h45m, step 15m.
Activity string
Actors []string
Customer string
Details string
}
type App struct {
sync.Mutex
lastID int
records []Record
topicInitialized
topicRecordAdded
}
func New() *App {
return &App{}
}
func (a *App) Initialize() {
a.loadFixture()
a.emitInitialized(EventInitialized{})
}
|
// Copyright 2016 Matthew Endsley
// All rights reserved
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted providing that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package main
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"regexp"
"strings"
"github.com/mendsley/parchment/binfmt"
)
type Config struct {
Inputs []*ConfigInput `json:"inputs"`
Outputs OutputChain `json:"outputs"`
}
type ConfigInput struct {
Address string `json:"address"`
TimeoutMS int `json:"imeoutms"`
FileMode string `json:"filemode"`
User string `json:"user"`
Group string `json:"group"`
}
type OutputChain []*ConfigOutput
type ConfigOutput struct {
Pattern string `json:"pattern"`
Type string `json:"type"`
Format string `json:"format"`
Path string `json:"path"`
DirectoryMode os.FileMode `json:"directorymode"`
FileMode os.FileMode `json:"filemode"`
Remote string `json:"remote"`
expr *regexp.Regexp
processor Processor
}
func ParseConfig(r io.Reader) (*Config, error) {
config := new(Config)
err := json.NewDecoder(r).Decode(config)
return config, err
}
func (config *Config) Compile() error {
// validate inputs
for _, input := range config.Inputs {
switch {
case strings.HasPrefix(input.Address, "tcp://"):
_, err := net.ResolveTCPAddr("tcp", input.Address[6:])
if err != nil {
return fmt.Errorf("Failed to parse input '%s', %v", input.Address, err)
}
case strings.HasPrefix(input.Address, "unix://"):
default:
return fmt.Errorf("Unknown input address '%s'", input.Address)
}
}
// validate output
for _, out := range config.Outputs {
if out.Pattern != "" {
re, err := regexp.Compile(out.Pattern)
if err != nil {
return fmt.Errorf("Failed to compile output regexp '%s', %v", out.Pattern, err)
}
out.expr = re
}
if out.Format == "" {
out.Format = "[%category%] %message%"
}
switch out.Type {
case "stdout":
out.processor = NewStdoutProcesor(out.Format)
case "file":
p, err := NewFileProcessor(out)
if err != nil {
return fmt.Errorf("Error processing '%s' - %v", out.Pattern, err)
}
out.processor = p
case "relay":
p, err := NewRelayProcessor(out)
if err != nil {
return fmt.Errorf("Error processing '%s' - %v", out.Pattern, err)
}
out.processor = p
default:
return fmt.Errorf("Unkown output type '%s'", out.Type)
}
}
// go through all outputs, and combine those with matching patterns into a
// single MutliProcessor.
m := make(map[string]*ConfigOutput)
for _, out := range config.Outputs {
if existing, ok := m[out.Pattern]; ok {
mp := NewMultiProcessor()
mp.Add(existing.processor)
mp.Add(out.processor)
existing.processor = mp
} else {
m[out.Pattern] = out
}
}
// flatten the map of outputs back into an array. Array
// index zero is reserved for the default processor, and
// is allowed to by nil. Start by appending at index 1.
config.Outputs = make(OutputChain, 1, len(m))
for _, out := range m {
if out.Pattern == "" {
if config.Outputs[0] != nil {
panic("Two default outputs were not properly collapsed into a MultiProcessor")
}
config.Outputs[0] = out
} else {
config.Outputs = append(config.Outputs, out)
}
}
return nil
}
func (oc OutputChain) FindProcessor(category []byte) Processor {
// try regular expressions first
for _, out := range oc[1:] {
if out.expr.Match(category) {
return out.processor
}
}
// try to dispatch to default handler
if oc[0] != nil {
return oc[0].processor
}
return nil
}
// split the log chain once the processor would chain. Return the
// processor for the intial chain portion
func (oc OutputChain) SplitForProcessor(chain *binfmt.Log) (processor Processor, remaining *binfmt.Log) {
if chain == nil {
return nil, nil
}
// get the processor for the head node
processor = oc.FindProcessor(chain.Category)
for it := chain; it.Next != nil; it = it.Next {
p := oc.FindProcessor(it.Next.Category)
if p != processor {
remaining = it.Next
it.Next = nil
return processor, remaining
}
}
return processor, nil
}
func (oc OutputChain) Close() {
for _, out := range oc {
if out != nil {
if err := out.processor.Close(); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to close output %s for %s: %v\n", out.Type, out.Pattern, err)
}
}
}
}
|
package mockingjay
import (
"github.com/stretchr/testify/assert"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestItCreatesHTTPRequests(t *testing.T) {
headers := make(map[string]string)
headers["foo"] = "bar"
uri := "/hello"
method := "PUT"
body := "Body body body"
baseURL := "http://localhost:1234"
mockingJayRequest := Request{
URI: uri,
Method: method,
Headers: headers,
Body: body}
httpRequest, _ := mockingJayRequest.AsHTTPRequest(baseURL)
assert.Equal(t, httpRequest.URL.String(), httpRequest.URL.String())
assert.Equal(t, httpRequest.Method, method)
assert.Equal(t, httpRequest.Header.Get("foo"), "bar")
requestBody, _ := ioutil.ReadAll(httpRequest.Body)
assert.Equal(t, string(requestBody), body)
}
func TestItMapsHTTPRequestsToMJRequests(t *testing.T) {
req, _ := http.NewRequest(http.MethodPost, "/foo", nil)
mjRequest := NewRequest(req)
assert.Equal(t, mjRequest.Method, http.MethodPost)
}
func TestItSendsForms(t *testing.T) {
mjReq := Request{
URI: "/cat",
Form: make(map[string]string),
Method: http.MethodPost,
}
mjReq.Form["name"] = "Hudson"
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
if r.PostForm.Get("name") != "Hudson" {
t.Error("Did not get expected form value from request", r.PostForm)
}
})
req, err := mjReq.AsHTTPRequest("/")
if err != nil {
t.Fatal("Couldnt create http request from mj request", err)
}
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
}
func TestItValidatesRequests(t *testing.T) {
noURIRequest := Request{
URI: "",
Method: "POST"}
assert.Equal(t, noURIRequest.errors(), errEmptyURI)
noMethodRequest := Request{
URI: "/",
Method: ""}
assert.Equal(t, noMethodRequest.errors(), errEmptyMethod)
validRequest := Request{
URI: "/",
Method: "POST",
}
assert.Nil(t, validRequest.errors())
}
|
package reverseStr
import "fmt"
func reverseStr(s string, k int) string {
var str string
if s == "" || k <= 1 {
return s
}
for index := 0; index < len(s); {
left, right := index, index+k-1
if right >= len(s) {
right = len(s) - 1
}
for i := right; i >= left; i-- {
str = fmt.Sprintf("%s%c", str, s[i])
}
left, right = right+1, right+k
if left >= len(s) {
break
}
if right >= len(s) {
right = len(s) - 1
}
for i := left; i <= right; i++ {
str = fmt.Sprintf("%s%c", str, s[i])
}
index = right + 1
}
return str
}
|
/*
* randmat: random number generation
*
* input:
* nrows, ncols: the number of rows and columns
* s: the seed
*
* output:
* martix: a nrows x ncols integer matrix
*
*/
package main
import (
"flag"
"fmt"
"math"
"sort"
)
type ByteMatrix struct {
Rows, Cols int
array []byte
}
func NewByteMatrix(r, c int) *ByteMatrix {
return &ByteMatrix{r, c, make([]byte, r*c)}
}
func (m *ByteMatrix) Row(i int) []byte {
return m.array[i*m.Cols : (i+1)*m.Cols]
}
func WrapBytes(r, c int, bytes []byte) *ByteMatrix {
return &ByteMatrix{r, c, bytes}
}
func (m *ByteMatrix) Bytes() []byte {
return m.array[0 : m.Rows*m.Cols]
}
const (
LCG_A = 1664525
LCG_C = 1013904223
)
var (
is_bench = flag.Bool("is_bench", false, "")
)
func Randmat(nelts int, s uint32) *ByteMatrix {
matrix := NewByteMatrix(nelts, nelts)
for i := 0; i < nelts; i++ {
var seed = s + uint32(i)
row := matrix.Row(i)
for j := range row {
seed = LCG_A*seed + LCG_C
row[j] = byte(seed%100) % 100
}
}
return matrix
}
func Thresh(m *ByteMatrix, nelts, percent int) (mask []bool) {
var hist [100]int
mask = make([]bool, nelts*nelts)
for _, v := range m.Bytes() {
hist[v]++
}
count := (nelts * nelts * percent) / 100
prefixsum := 0
var threshold int
for threshold = 99; threshold > 0; threshold-- {
prefixsum += hist[threshold]
if prefixsum > count {
break
}
}
for i := 0; i < nelts; i++ {
row := m.Row(i)
for j := range row {
mask[i*nelts+j] = row[j] >= byte(threshold)
}
}
return
}
// Winnow structure and sorting helpers
type WinnowPoints struct {
m *ByteMatrix
e []int // indexes into the ByteMatrix 'm'
}
func (p *WinnowPoints) Len() int {
return len(p.e)
}
func (p *WinnowPoints) Swap(i, j int) {
p.e[i], p.e[j] = p.e[j], p.e[i]
}
func (p *WinnowPoints) Less(i, j int) bool {
if p.m.array[p.e[i]] != p.m.array[p.e[j]] {
return p.m.array[p.e[i]] < p.m.array[p.e[j]]
}
return p.e[i] < p.e[j]
}
type Point struct {
x, y int
}
func Winnow(m *ByteMatrix, mask []bool, nelts, winnow_nelts int) (points []Point) {
var values WinnowPoints
values.m = m
for i := 0; i < nelts; i++ {
for j := 0; j < nelts; j++ {
idx := i*nelts + j
if mask[idx] {
values.e = append(values.e, idx)
}
}
}
sort.Sort(&values)
chunk := values.Len() / winnow_nelts
points = make([]Point, winnow_nelts)
for i := 0; i < winnow_nelts; i++ {
v := values.e[i*chunk]
p := Point{v / nelts, v % nelts}
points[i] = p
}
return
}
func Sqr(x float64) float64 {
return x * x
}
func Distance(ax, ay, bx, by int) float64 {
return math.Sqrt(float64(Sqr(float64(ax-bx)) + Sqr(float64(ay-by))))
}
func Outer(wp []Point, nelts int) (m []float64, vec []float64) {
m = make([]float64, nelts*nelts)
vec = make([]float64, nelts)
for i, v := range wp {
nmax := float64(0)
for j, w := range wp {
if i != j {
d := Distance(v.x, v.y, w.x, w.y)
if d > nmax {
nmax = d
}
m[i*nelts+j] = d
}
}
m[i*(nelts+1)] = float64(nelts) * nmax
vec[i] = Distance(0, 0, v.x, v.y)
}
return
}
func Product(m, vec []float64, nelts int) (result []float64) {
result = make([]float64, nelts)
for i := 0; i < nelts; i++ {
sum := 0.0
for j := 0; j < nelts; j++ {
sum += m[i*nelts+j] * vec[j]
}
result[i] = sum
}
return
}
func main() {
flag.Parse()
var nelts, thresh_percent, seed, winnow_nelts int
fmt.Scan(&nelts)
fmt.Scan(&seed)
fmt.Scan(&thresh_percent)
fmt.Scan(&winnow_nelts)
rand_matrix := Randmat(nelts, uint32(seed))
mask := Thresh(rand_matrix, nelts, thresh_percent)
win_pts := Winnow(rand_matrix, mask, nelts, winnow_nelts)
out_matrix, out_vec := Outer(win_pts, winnow_nelts)
result := Product(out_matrix, out_vec, winnow_nelts)
if !*is_bench {
for i := 0; i < winnow_nelts; i++ {
fmt.Printf("%.3f ", result[i])
}
fmt.Printf("\n")
}
}
|
package adapter
import (
"github.com/mingo-chen/wheel-minirpc/ext"
)
func init() {
ext.RegistCoder("minirpc", MiniRpcReqCoder{}, MiniRpcRspCoder{})
ext.RegistFramer("minirpc", MiniRpcFramer{})
}
|
package vm
import (
"fmt"
"os"
"github.com/luciancaetano/arch-v/register"
)
// RegisterMap ...
type RegisterMap struct {
regs [numRegs]*register.Register
}
// rw get register for read and write
func (r *RegisterMap) rw(reg byte) *register.Register {
if reg < 0 || int(reg) > len(r.regs) {
fmt.Printf("Register %d out of range\n", reg)
os.Exit(1)
}
if reg <= reservedRegister {
fmt.Printf("Register %d reserved", reg)
os.Exit(1)
}
return r.regs[reg]
}
// rx get register for readonly
func (r *RegisterMap) rx(reg byte) *register.Register {
if reg < 0 || int(reg) > len(r.regs) {
fmt.Printf("Register %d out of range\n", reg)
os.Exit(1)
}
return r.regs[reg]
}
// reserved access reserved registers
func (r *RegisterMap) reserved(reg byte) *register.Register {
if reg < 0 || int(reg) > len(r.regs) {
fmt.Printf("Register %d out of range\n", reg)
os.Exit(1)
}
if reg > reservedRegister {
fmt.Printf("Register %d is not reserved", reg)
os.Exit(1)
}
return r.regs[reg]
}
// setReserved set reserver register
func (r *RegisterMap) setReserved(reg byte, v *register.Register) {
if reg < 0 || int(reg) > len(r.regs) {
fmt.Printf("Register %d out of range\n", reg)
os.Exit(1)
}
if reg > reservedRegister {
fmt.Printf("Register %d is not reserved", reg)
os.Exit(1)
}
r.regs[reg] = v
}
// NewRegisterMap ...
func NewRegisterMap() *RegisterMap {
r := &RegisterMap{}
for i := 0; i < len(r.regs); i++ {
r.regs[i] = register.NewRegister()
}
return r
}
|
package slb
import (
"context"
"crypto/tls"
"net"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/pkg/errors"
)
// Server is an interface for representing server load balancer implementation.
type Server interface {
ListenAndServe() error
Shutdown(context.Context) error
}
type serverLoadBalancer struct {
*Config
*http.Server
RequestDirector func(target *url.URL) func(*http.Request)
HandlerDirector HandlerDirector
}
// CreateSLB returns Server implementation(*serverLoadBalancer) from the given Config.
func CreateSLB(cfg *Config, ops ...Option) (Server, error) {
err := cfg.validate()
if err != nil {
return nil, errors.Wrap(err, "invalid configuration")
}
sbl := &serverLoadBalancer{
Config: cfg,
RequestDirector: func(target *url.URL) func(*http.Request) {
return func(req *http.Request) {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = target.Path
if target.RawQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = target.RawQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = target.RawQuery + "&" + req.URL.RawQuery
}
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", "")
}
}
},
HandlerDirector: cfg.Balancing.CreateHandler,
}
sbl.apply(ops...)
sbl.Server = &http.Server{
Handler: sbl.HandlerDirector(cfg.BackendServerConfigs.getURLs(), sbl),
}
return sbl, nil
}
func (s *serverLoadBalancer) apply(ops ...Option) {
for _, op := range ops {
op(s)
}
}
func (s *serverLoadBalancer) Proxy(target *url.URL, w http.ResponseWriter, req *http.Request) {
(&httputil.ReverseProxy{
Director: s.RequestDirector(target),
}).ServeHTTP(w, req)
}
func (s *serverLoadBalancer) ListenAndServe() error {
addr := s.Config.Host + ":" + s.Config.Port
var (
ls net.Listener
err error
)
if s.Config.TLSConfig.Enabled {
ls, err = createTLSListenter(addr, s.Config.TLSConfig.CertKey, s.Config.TLSConfig.KeyKey)
if err != nil {
return errors.Wrap(err, "faild to create tls lisner")
}
} else {
ls, err = createListener(addr)
if err != nil {
return errors.Wrap(err, "faild to create listener")
}
}
err = s.Server.Serve(ls)
if err != nil {
return errors.Wrap(err, "faild to serve")
}
return nil
}
func createListener(addr string) (net.Listener, error) {
ls, err := net.Listen("tcp", addr)
if err != nil {
return nil, errors.Wrapf(err, "faild to create lisner, network: tcp, addr: %s", addr)
}
return ls, nil
}
func createTLSListenter(addr string, certFile, keyFile string) (net.Listener, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, errors.Wrapf(err, "faild to load 509 key parir, certFile: %s, keyFile: %s", certFile, keyFile)
}
cfg := &tls.Config{
Certificates: []tls.Certificate{cert},
}
ls, err := tls.Listen("tcp", addr, cfg)
if err != nil {
return nil, errors.Wrapf(err, "faild to create listener, network: tcp, addr: %s", addr)
}
return ls, nil
}
func (s *serverLoadBalancer) Shutdown(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
err := s.Server.Shutdown(ctx)
if err != nil {
return errors.Wrap(err, "faild to shutdown")
}
return nil
}
|
package uploader
import (
"errors"
"fmt"
"mime"
"net/http"
)
const MaxImageSize = 10 * 1024 * 1024 // ~10MB
var allowedContentTypes = []string{"image/jpeg", "image/png"}
func ValidateURLResponse(res *http.Response, url string) (string, error) {
contentType, _, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
if err != nil {
return "", err
}
if !IsImage(contentType) {
return "", errors.New(fmt.Sprintf("Content-Type of %s is not allowed", url))
}
if !IsValidImageSize(res.ContentLength) {
return "", errors.New(fmt.Sprintf("Content-Length of %s is too big", url))
}
ext, err := mime.ExtensionsByType(contentType)
if err != nil {
return "", err
}
return ext[0], nil
}
func IsImage(contentType string) bool {
for _, act := range allowedContentTypes {
if contentType == act {
return true
}
}
return false
}
func IsValidImageSize(contentLength int64) bool {
if contentLength > MaxImageSize {
return false
}
return true
}
|
package repositories
import (
"context"
"database/sql"
"github.com/syncromatics/kafmesh/internal/graph/loaders"
"github.com/syncromatics/kafmesh/internal/graph/model"
"github.com/pkg/errors"
)
var _ loaders.QueryRepository = &Query{}
// Query is the repository for root queries
type Query struct {
db *sql.DB
}
// GetAllServices returns all services in the datastore
func (r *Query) GetAllServices(ctx context.Context) ([]*model.Service, error) {
rows, err := r.db.QueryContext(ctx, `select id, name, description from services`)
if err != nil {
return nil, errors.Wrap(err, "failed to query services")
}
defer rows.Close()
services := []*model.Service{}
for rows.Next() {
service := &model.Service{}
err = rows.Scan(&service.ID, &service.Name, &service.Description)
if err != nil {
return nil, errors.Wrap(err, "failed to scan service")
}
services = append(services, service)
}
return services, nil
}
// GetAllPods returns all pods in the datastore
func (r *Query) GetAllPods(ctx context.Context) ([]*model.Pod, error) {
rows, err := r.db.QueryContext(ctx, `select id, name from pods`)
if err != nil {
return nil, errors.Wrap(err, "failed to query pods")
}
defer rows.Close()
pods := []*model.Pod{}
for rows.Next() {
pod := &model.Pod{}
err = rows.Scan(&pod.ID, &pod.Name)
if err != nil {
return nil, errors.Wrap(err, "failed to scan pod")
}
pods = append(pods, pod)
}
return pods, nil
}
// GetAllTopics returns all topics in the datastore
func (r *Query) GetAllTopics(ctx context.Context) ([]*model.Topic, error) {
rows, err := r.db.QueryContext(ctx, `select id, name, message from topics`)
if err != nil {
return nil, errors.Wrap(err, "failed to query topics")
}
defer rows.Close()
results := []*model.Topic{}
for rows.Next() {
topic := &model.Topic{}
err = rows.Scan(&topic.ID, &topic.Name, &topic.Message)
if err != nil {
return nil, errors.Wrap(err, "failed to scan topic")
}
results = append(results, topic)
}
return results, nil
}
// ServiceByID gets a service by id
func (r *Query) ServiceByID(ctx context.Context, id int) (*model.Service, error) {
row := r.db.QueryRowContext(ctx, `
select
id,
name,
description
from
services
where
id=$1`, id)
service := &model.Service{}
err := row.Scan(&service.ID, &service.Name, &service.Description)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, errors.Wrap(err, "failed to scan for service")
}
return service, nil
}
// ComponentByID gets a service by id
func (r *Query) ComponentByID(ctx context.Context, id int) (*model.Component, error) {
row := r.db.QueryRowContext(ctx, `
select
id,
name,
description
from
components
where
id=$1`, id)
component := &model.Component{}
err := row.Scan(&component.ID, &component.Name, &component.Description)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, errors.Wrap(err, "failed to scan for component")
}
return component, nil
}
|
package helpers
import (
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/img"
"io/ioutil"
"math"
"bytes"
)
type Node struct {
X int32
Y int32
Type NodeType
Right *Node
Left *Node
Up *Node
Down *Node
Image *sdl.Surface
}
var Nodes []*Node
func linkNodes() {
for _, n1 := range Nodes {
for _, n2 := range Nodes {
if n1.Y == n2.Y {
if n2.X == n1.X + 20 {
n1.Right = n2
} else if n2.X == n1.X - 20 {
n1.Left = n2
}
} else if n1.X == n2.X {
if n2.Y == n1.Y + 20 {
n1.Down = n2
} else if n2.Y == n1.Y - 20 {
n1.Up = n2
}
}
}
}
}
func LoadImage(file string) *sdl.Surface {
image, err := img.Load(file)
if err != nil {
panic(err)
}
//defer image.Free()
return image
}
func CreatePath() {
b, err := ioutil.ReadFile("./helpers/map_sim.txt")
if err != nil {
panic(err)
}
b = bytes.Trim(b, "\n")
for i, byte := range b {
switch string(byte) {
case "O": // Ordinary node
n := Node{int32((i%25)*20), int32((math.Floor(float64(i/25)))*20), Normal, nil, nil, nil, nil, LoadImage(Stone_image)}
Nodes = append(Nodes, &n)
break
case "$": // Treasure node
n := Node{int32((i%25)*20), int32((math.Floor(float64(i/25)))*20), Treasure, nil, nil, nil, nil, LoadImage(Treasure_image)}
Nodes = append(Nodes, &n)
break
case "D": // Door
var n Node
if int32((i%25)*20) == 0 {
n = Node{int32((i%25)*20), int32((math.Floor(float64(i/25)))*20), Door, nil, nil, nil, nil, LoadImage(Door_left_image)}
} else {
n = Node{int32((i%25)*20), int32((math.Floor(float64(i/25)))*20), Door, nil, nil, nil, nil, LoadImage(Door_right_image)}
}
Nodes = append(Nodes, &n)
break
case "I":
n := Node{int32((i%25)*20), int32((math.Floor(float64(i/25)))*20), InventorySpot, nil, nil, nil, nil, LoadImage(Inventory_spot_empty_image)}
Nodes = append(Nodes, &n)
break
case "#": // Wall
break
}
}
linkNodes()
}
|
// Package process holds a Transport implementation that runs local, unsandboxed processes
package process
import (
"bufio"
"errors"
"fmt"
"os/exec"
"path"
"sync"
"time"
"github.com/anmitsu/go-shlex"
"awesome-dragon.science/go/goGoGameBot/internal/config/tomlconf"
"awesome-dragon.science/go/goGoGameBot/internal/process"
"awesome-dragon.science/go/goGoGameBot/internal/transport/util"
"awesome-dragon.science/go/goGoGameBot/pkg/log"
)
// New creates a new ProcessTransport for use with a process
func New(transportConfig tomlconf.ConfigHolder, logger *log.Logger) (*ProcessTransport, error) {
p := ProcessTransport{log: logger.SetPrefix(logger.Prefix() + "|" + "PT")}
if err := p.Update(transportConfig); err != nil {
return nil, err
}
return &p, nil
}
// ProcessTransport is a transport implementation that works with a process.Process to
// provide local-to-us game servers
type ProcessTransport struct {
process *process.Process
log *log.Logger
stdout chan []byte
stderr chan []byte
}
// GetStatus returns the current state of the game the transport manages
func (p *ProcessTransport) GetStatus() util.TransportStatus {
if p.process.IsRunning() {
return util.Running
}
return util.Stopped
}
// GetHumanStatus returns the status of the transport that is human readable
func (p *ProcessTransport) GetHumanStatus() string {
return p.process.GetStatus()
}
func (p *ProcessTransport) monitorStdIO() error {
if !p.process.IsRunning() {
return errors.New("cannot watch stdio on a non-running game")
}
go func() {
s := bufio.NewScanner(p.process.Stdout)
last := ""
for s.Scan() {
b := s.Bytes()
p.getStdioChan(true) <- b
last = string(b)
}
close(p.getStdioChan(true))
p.log.Infof("stdout exit: %q", last)
}()
go func() {
s := bufio.NewScanner(p.process.Stderr)
for s.Scan() {
p.getStdioChan(false) <- s.Bytes()
}
close(p.getStdioChan(false))
p.log.Info("stderr exit")
}()
return nil
}
func (p *ProcessTransport) getStdioChan(stdout bool) chan []byte {
if stdout {
if p.stdout == nil {
p.stdout = make(chan []byte)
}
return p.stdout
}
if p.stderr == nil {
p.stderr = make(chan []byte)
}
return p.stderr
}
// Stdout returns a channel that will have lines from stdout sent over it.
func (p *ProcessTransport) Stdout() <-chan []byte {
return p.getStdioChan(true)
}
// Stderr returns a channel that will have lines from stderr sent over it
func (p *ProcessTransport) Stderr() <-chan []byte {
return p.getStdioChan(false)
}
// Update updates the Transport with a TransportConfig
func (p *ProcessTransport) Update(rawConf tomlconf.ConfigHolder) error {
conf := new(Config)
if err := rawConf.RealConf.Unmarshal(conf); err != nil {
return fmt.Errorf("could not unmarshal config: %w", err)
}
workingDir := conf.WorkingDirectory
if workingDir == "" {
workingDir = path.Dir(conf.Path)
p.log.Infof("working directory inferred to %q from binary path %q", workingDir, conf.Path)
}
procArgs, err := shlex.Split(conf.Args, true)
if err != nil {
return fmt.Errorf("could not parse arguments: %w", err)
}
if p.process == nil {
l := p.log.Clone().SetPrefix(p.log.Prefix() + "|" + "P")
proc, err := process.NewProcess(conf.Path, procArgs, workingDir, l, conf.Environment, conf.CopyEnv)
if err != nil {
return err
}
p.process = proc
} else {
p.process.UpdateCmd(conf.Path, procArgs, workingDir, conf.Environment, conf.CopyEnv)
}
return nil
}
// StopOrKill attempts to stop the process with SIGINT, and after 30 seconds stops it with SIGKILL
func (p *ProcessTransport) StopOrKill() error {
return p.StopOrKillTimeout(time.Second * 30)
}
// StopOrKillTimeout is like StopOrKill, but allows you to specify the timeout
func (p *ProcessTransport) StopOrKillTimeout(duration time.Duration) error {
if !p.IsRunning() {
return util.ErrorNotRunning
}
return p.process.StopOrKillTimeout(duration)
}
// StopOrKillWaitgroup calls StopOrKill, and marks a waitgroup as Done once it has completed.
// The waitgroup is incremented automatically before the StopOrKill call
func (p *ProcessTransport) StopOrKillWaitgroup(group *sync.WaitGroup) {
group.Add(1)
if err := p.StopOrKill(); err != nil {
p.log.Warnf("error while stopping game: %s", err)
}
group.Done()
}
// Run runs the process once, if it is not already running. It blocks until the process exits
func (p *ProcessTransport) Run(start chan struct{}) (exitCode int, exitString string, exitError error) {
closed := false
defer func() {
if !closed {
close(start)
}
}()
if p.IsRunning() {
return -1, "", fmt.Errorf("could not start game: %w", util.ErrorAlreadyRunning)
}
if err := p.process.Reset(); err != nil {
return -1, "", fmt.Errorf("could not reset process: %w", err)
}
p.stdout = nil
p.stderr = nil
if err := p.process.Start(); err != nil {
return -1, "", fmt.Errorf("could not start process: %w", err)
}
close(start)
closed = true
if err := p.monitorStdIO(); err != nil {
go func() { _ = p.StopOrKill() }()
return -1, "", fmt.Errorf("could not begin monitoring standard i/o. Aborting: %w", err)
}
if err := p.process.WaitForCompletion(); err != nil && !errors.Is(err, &exec.ExitError{}) {
return p.process.GetReturnCode(), p.process.GetReturnStatus(), err
}
return p.process.GetReturnCode(), p.process.GetReturnStatus(), nil
}
// IsRunning returns whether or not the process is currently running
func (p *ProcessTransport) IsRunning() bool {
return p.process.IsRunning()
}
func (p *ProcessTransport) Write(b []byte) (n int, err error) {
return p.process.Write(b)
}
// WriteString writes the given string to the process's stdin
func (p *ProcessTransport) WriteString(s string) (n int, err error) {
return p.process.WriteString(s)
}
|
package ants
import (
"errors"
"math/rand"
"strconv"
"time"
"github.com/radekwlsk/go-travel/gotravel/gotravelservice/trip"
"gonum.org/v1/gonum/floats"
)
var (
ErrPlaceClosed = errors.New("place closed at that day")
ErrPlaceClosesTooEarly = errors.New("place closes too early")
ErrTripEndsTooEarly = errors.New("trip time ends before place departure")
ErrCantReachEndPlace = errors.New("can't reach set end place in time")
ErrTripEnded = errors.New("no place reachable before trip time end")
ErrMustReturnToStart = errors.New("must return to start place before trip ends")
ErrMustReachEndPlace = errors.New("must get to end place before trip ends")
)
type Used map[int]bool
type Ant struct {
trip *trip.Trip
visitTimes VisitTimes
startPlace *trip.Place
endPlace *trip.Place
n int
path trip.Path
at int
used Used
currentTime time.Time
totalTime time.Duration
totalDistance int64
distances *TimesMappedDistancesMatrix
durations *TimesMappedDurationsMatrix
pheromones *PheromonesMatrix
random *rand.Rand
resultChannel chan Result
}
func NewAnt(
trip *trip.Trip,
distances *TimesMappedDistancesMatrix,
durations *TimesMappedDurationsMatrix,
pheromones *PheromonesMatrix,
resultChannel chan Result,
) (a *Ant) {
a = &Ant{
trip: trip,
distances: distances,
durations: durations,
pheromones: pheromones,
resultChannel: resultChannel,
}
a.init()
return a
}
func (a *Ant) SetPheromones(p *PheromonesMatrix) {
a.pheromones = p
}
func (a *Ant) FindFood() {
err := a.before()
switch err {
case ErrTripEnded:
a.resultChannel <- NewResult(
a.path,
a.totalTime,
a.totalDistance,
a.sumPriorities(),
a.visitTimes,
)
case nil:
err = a.generatePath()
if err != nil && err != ErrTripEnded {
panic(err.Error())
}
a.resultChannel <- NewResult(
a.path,
a.totalTime,
a.totalDistance,
a.sumPriorities(),
a.visitTimes,
)
default:
panic(err.Error())
}
}
func (a *Ant) setStart() error {
if a.trip.StartPlace == nil {
var reachable []*trip.Place
for _, p := range a.trip.Places {
a.at = p.Index
if ok, _ := a.placeReachable(p); ok && p != a.endPlace {
reachable = append(reachable, p)
}
}
if n := len(reachable); n > 0 {
i := a.random.Intn(n)
a.startPlace = reachable[i]
} else if a.endPlace != nil {
a.startPlace = a.endPlace
} else {
return ErrTripEnded
}
} else {
a.startPlace = a.trip.StartPlace
}
return nil
}
func (a *Ant) init() {
a.n = len(a.trip.Places)
a.random = rand.New(rand.NewSource(time.Now().UnixNano()))
}
func (a *Ant) setStep(i int, place *trip.Place) {
var dist int64
var dur time.Duration
arrival, departure, err := a.placeArrivalDeparture(place, i == 0)
if err != nil {
panic(err.Error())
}
if i > 0 {
dist = a.distances.At(a.at, place.Index, a.currentTime)
dur = arrival.Sub(a.currentTime)
a.path.SetStep(i, place.Index, dur, dist)
a.totalTime += dur
a.currentTime = arrival
a.totalDistance += dist
} else {
a.path.Set(0, place.Index)
}
if place != a.startPlace || i == 0 {
a.visitTimes.Arrivals[place.Index] = arrival
a.totalTime += departure.Sub(a.currentTime)
a.currentTime = departure
a.visitTimes.Departures[place.Index] = departure
}
a.at = place.Index
a.used[a.at] = true
}
func (a *Ant) isUsed(place *trip.Place) bool {
return a.used[place.Index]
}
func (a *Ant) before() error {
a.endPlace = a.trip.EndPlace
a.visitTimes = NewVisitTimes(a.n)
a.used = make(Used, a.n)
a.currentTime = a.trip.TripStart
a.totalTime = time.Duration(0)
a.totalDistance = 0
if err := a.setStart(); err != nil {
return err
}
a.path = trip.NewPath(a.n, a.startPlace == a.endPlace)
a.setStep(0, a.startPlace)
return nil
}
func (a *Ant) generatePath() error {
for i := 1; i < a.n; i++ {
switch next, err := a.pickNextPlace(); err {
case ErrMustReachEndPlace:
a.setStep(i, next)
if i+1 < a.path.Size()-1 {
a.path.Cut(i + 1)
}
return ErrTripEnded
case ErrTripEnded:
a.path.Cut(i)
return ErrTripEnded
case ErrMustReturnToStart:
if i < a.path.Size()-1 {
a.path.Cut(i)
}
a.setStep(i, next)
return ErrTripEnded
case nil:
a.setStep(i, next)
default:
panic("unexpected error returned from Ant.pickNextPlace()")
}
}
switch a.endPlace {
case nil:
return ErrTripEnded
case a.startPlace:
a.setStep(a.n, a.startPlace)
return ErrTripEnded
default:
panic("end place != start place left to visit after loop")
}
}
func (a *Ant) pickNextPlace() (place *trip.Place, err error) {
var available []*trip.Place
for _, p := range a.trip.Places {
if !a.isUsed(p) && p != a.endPlace {
available = append(available, p)
}
}
var reachable []*trip.Place
var pheromones []float64
for _, p := range available {
if ok, _ := a.placeReachable(p); ok {
reachable = append(reachable, p)
pheromone := a.pheromones.At(a.at, p.Index)
pheromones = append(pheromones, pheromone)
}
}
l := len(reachable)
if l == 0 {
if a.startPlace == a.endPlace {
return a.endPlace, ErrMustReturnToStart
} else if a.endPlace != nil {
return a.endPlace, ErrMustReachEndPlace
}
return nil, ErrTripEnded
}
total := floats.Sum(pheromones)
for {
for i := range a.random.Perm(l) {
if a.random.Float64() <= pheromones[i]/total {
return reachable[i], nil
}
}
}
}
func (a *Ant) placeArrivalDeparture(place *trip.Place, first bool) (arrival, departure time.Time, err error) {
var opn, cls time.Time
if first {
arrival = a.currentTime
} else {
dur := a.durations.At(a.at, place.Index, a.currentTime)
arrival = a.currentTime.Add(dur)
}
if !first && place == a.startPlace {
return arrival, arrival, nil
}
departure = arrival.Add(time.Duration(place.StayDuration) * time.Minute)
oc := place.Details.OpeningHoursPeriods[a.currentTime.Weekday()]
if oc.Open == "" && oc.Close == "" {
return arrival, departure, ErrPlaceClosed
}
{
o := oc.Open
y, m, d := arrival.In(place.Details.Location).Date()
hh, _ := strconv.Atoi(o[:2])
mm, _ := strconv.Atoi(o[2:])
opn = time.Date(y, m, d, hh, mm, 0, 0, place.Details.Location).In(arrival.Location())
}
if opn.After(arrival) {
departure = opn.Add(time.Duration(place.StayDuration) * time.Minute)
}
{
c := oc.Close
y, m, d := departure.In(place.Details.Location).Date()
hh, _ := strconv.Atoi(c[:2])
mm, _ := strconv.Atoi(c[2:])
cls = time.Date(y, m, d, hh, mm, 0, 0, place.Details.Location).In(arrival.Location())
}
if cls.Before(departure) {
return arrival, departure, ErrPlaceClosesTooEarly
}
if a.trip.TripEnd.Before(departure) {
return arrival, departure, ErrTripEndsTooEarly
}
return
}
func (a *Ant) placeReachable(place *trip.Place) (ok bool, err error) {
if place.Details.PermanentlyClosed {
return false, ErrPlaceClosed
}
_, dprt, err := a.placeArrivalDeparture(place, a.currentTime.Equal(a.trip.TripStart))
if err != nil {
return false, err
}
if a.endPlace != nil {
fin := dprt.Add(a.durations.At(place.Index, a.endPlace.Index, dprt))
if a.endPlace != a.startPlace {
stay := time.Duration(a.endPlace.StayDuration) * time.Minute
fin = fin.Add(stay)
}
if a.trip.TripEnd.Before(fin) {
return false, ErrCantReachEndPlace
}
}
return true, nil
}
func (a *Ant) sumPriorities() (sum int) {
for _, i := range a.path.Path() {
sum += a.trip.Places[i].Priority
}
return
}
|
package main
// escreva um programa que coloque na tela os numero de 1 a 10000
import "fmt"
func main() {
for x := 0; x <= 10000; x++ {
fmt.Println(x)
}
}
|
package xlsx
// xlsxWorksheet directly maps the worksheet element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxWorksheet struct {
SheetFormatPr xlsxSheetFormatPr `xml:"sheetFormatPr"`
Dimension xlsxDimension `xml:"dimension"`
SheetData xlsxSheetData `xml:"sheetData"`
SheetProtection xlsxSheetProtection `xml:"sheetProtection"`
}
type xlsxSheetFormatPr struct {
DefaultRowHeight float64 `xml:"defaultRowHeight,attr"`
}
// xlsxDimension directly maps the dimension element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxDimension struct {
Ref string `xml:"ref,attr"`
}
// xlsxSheetData directly maps the sheetData element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxSheetData struct {
Row []xlsxRow `xml:"row"`
}
// xlsxRow directly maps the row element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxRow struct {
R int `xml:"r,attr"`
Spans string `xml:"spans,attr"`
C []xlsxC `xml:"c"`
Ht float64 `xml:"ht,attr"`
CustomHeight int `xml:"customHeight,attr"`
}
// xlsxSheetProtection directly maps the sheetProtection element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxSheetProtection struct {
Sheet bool `xml:"sheet,attr"`
}
type xlsxSharedFormula struct {
F string
Ref string
cellX int
cellY int
}
type xlsxF struct {
F string `xml:",innerxml"`
Si string `xml:"si,attr"`
Ref string `xml:"ref,attr"`
T string `xml:"t,attr"`
}
// xlsxC directly maps the c element in the namespace
// http://schemas.openxmlformats.org/spreadsheetml/2006/main -
// currently I have not checked it for completeness - it does as much
// as I need.
type xlsxC struct {
F xlsxF `xml:"f"`
R string `xml:"r,attr"`
S int `xml:"s,attr"`
T string `xml:"t,attr"`
V string `xml:"v"`
}
// get cell
func (sh *Sheet) Cell(row, col int) *Cell {
cell, ok := sh.Cells[CellCoord{col, row}]
if ok {
return &cell
}
return nil
}
|
package smallNet
import "net"
type sessionError int16
const netLibErrNone = 0
const (
sessionErrStart = sessionError(iota)
sessionCloseForce
sessionCloseAllSession
sessionCloseRecvGoroutineEnd
sessionCloseForceTerminateRecvGoroutine
sessionCloseCloseRemote
sessionCloseSocketError
sessionCloseSocketReadTimeout
sessionCloseRecvMakePacketTooLargePacketSize
sessionCloseRecvTooSmallData
sessionDisablePacketProcess
ringBufferRecvInitFail
)
func (s sessionError) Error() string {
return _closeCaseMessage[s]
}
var _closeCaseMessage = [...]string{
sessionErrStart: "",
sessionCloseForce: "session close force",
sessionCloseAllSession: "session close all session",
sessionCloseRecvGoroutineEnd: "session close recv goroutine end",
sessionCloseForceTerminateRecvGoroutine: "session close force terminate recv goroutine",
sessionCloseCloseRemote: "session close - close remote",
sessionCloseSocketError: "session close socket error",
sessionCloseSocketReadTimeout: "session close socket read timeout",
sessionCloseRecvMakePacketTooLargePacketSize: "session close recv make packet too large packet size",
sessionCloseRecvTooSmallData: "session close recv too small data",
sessionDisablePacketProcess: "session disable packet process",
ringBufferRecvInitFail: "ringBufferRecvInitFail",
}
var NetMsg_None int8 = 0
var NetMsg_Connect int8 = 1
var NetMsg_Close int8 = 2
var NetMsg_Receive int8 = 3
type NetMsg struct {
Type int8
SessionIndex int
Data []byte
TcpConn *net.TCPConn
}
type PacketReceivceFunctors struct {
AddNetMsgOnCloseFunc func(int)
AddNetMsgOnReceiveFunc func(int, []byte)
// 데이터를 분석하여 패킷 크기를 반환한다.
PacketTotalSizeFunc func([]byte) int16
// 패킷 헤더의 크기
PacketHeaderSize int16
// true 이면 client와 연결한 세션이다.
IsClientSession bool
}
|
package dockerfile
import (
"bytes"
"testing"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/post-processor/docker-import"
)
func testConfig() map[string]interface{} {
return map[string]interface{}{
"maintainer": "foo",
"cmd": []interface{}{ "/foo/bar" },
"label": map[string]string{ "foo": "bar" },
"expose": []string{ "1234" },
"env": map[string]string{ "foo": "bar" },
"entrypoint": []interface{}{ "/foo/bar" },
"volume": []string{ "/foo/bar" },
"user": "foo",
"workdir": "/foo/bar",
}
}
func testPP(t *testing.T) *PostProcessor {
var p PostProcessor
if err := p.Configure(testConfig()); err != nil {
t.Fatalf("err: %s", err)
}
return &p
}
func testUi() *packer.BasicUi {
return &packer.BasicUi{
Reader: new(bytes.Buffer),
Writer: new(bytes.Buffer),
}
}
func TestPostProcessor_ImplementsPostProcessor(t *testing.T) {
var _ packer.PostProcessor = new(PostProcessor)
}
func TestPostProcessor_PostProcess(t *testing.T) {
driver := &MockDriver{}
p := &PostProcessor{Driver: driver}
if err := p.Configure(testConfig()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := &packer.MockArtifact{
BuilderIdValue: dockerimport.BuilderId,
IdValue: "1234567890abcdef",
}
result, keep, err := p.PostProcess(testUi(), artifact)
if _, ok := result.(packer.Artifact); !ok {
t.Fatal("should be instance of Artifact")
}
if !keep {
t.Fatal("should keep")
}
if err != nil {
t.Fatalf("err: %s", err)
}
if !driver.BuildImageCalled {
t.Fatal("should call BuildImage")
}
dockerfile := `FROM 1234567890abcdef
MAINTAINER foo
CMD ["/foo/bar"]
LABEL "foo"="bar"
EXPOSE 1234
ENV foo bar
ENTRYPOINT ["/foo/bar"]
VOLUME ["/foo/bar"]
USER foo
WORKDIR /foo/bar`
if driver.BuildImageDockerfile.String() != dockerfile {
t.Fatalf("should render Dockerfile correctly: %s", driver.BuildImageDockerfile.String())
}
}
func TestPostProcessor_processVar(t *testing.T) {
driver := &MockDriver{}
p := &PostProcessor{Driver: driver}
if err := p.Configure(testConfig()); err != nil {
t.Fatalf("err: %s", err)
}
res, err := p.processVar("foo");
if err != nil {
t.Fatalf("failed to process variable: %s", err)
}
if res != "foo" {
t.Fatalf("should be foo: %s", res)
}
res, err = p.processVar([]string{ "foo", "bar" });
if err != nil {
t.Fatalf("failed to process variable: %s", err)
}
if res != `["foo","bar"]` {
t.Fatalf(`should be ["foo","bar"]: %s`, res)
}
res, err = p.processVar([]interface{}{ "foo", "bar" });
if err != nil {
t.Fatalf("failed to process variable: %s", err)
}
if res != `["foo","bar"]` {
t.Fatalf(`should be ["foo","bar"]: %s`, res)
}
_, err = p.processVar(nil);
if err != nil {
t.Fatalf("failed to process variable: %s", err)
}
}
|
package data
type Data struct {
Result interface{} `json: result`
Infomation Infomation `json: infomation`
}
type Infomation struct {
Profile string `json: profile`
Region string `json: region`
}
type Infomations []Infomation
|
package types
import (
"github.com/openshift/installer/pkg/types/alibabacloud"
"github.com/openshift/installer/pkg/types/aws"
"github.com/openshift/installer/pkg/types/azure"
"github.com/openshift/installer/pkg/types/baremetal"
"github.com/openshift/installer/pkg/types/gcp"
"github.com/openshift/installer/pkg/types/ibmcloud"
"github.com/openshift/installer/pkg/types/libvirt"
"github.com/openshift/installer/pkg/types/nutanix"
"github.com/openshift/installer/pkg/types/openstack"
"github.com/openshift/installer/pkg/types/ovirt"
"github.com/openshift/installer/pkg/types/powervs"
"github.com/openshift/installer/pkg/types/vsphere"
)
// ClusterMetadata contains information
// regarding the cluster that was created by installer.
type ClusterMetadata struct {
// ClusterName is the name for the cluster.
ClusterName string `json:"clusterName"`
// ClusterID is a globally unique ID that is used to identify an Openshift cluster.
ClusterID string `json:"clusterID"`
// InfraID is an ID that is used to identify cloud resources created by the installer.
InfraID string `json:"infraID"`
ClusterPlatformMetadata `json:",inline"`
}
// ClusterPlatformMetadata contains metadata for platfrom.
type ClusterPlatformMetadata struct {
AlibabaCloud *alibabacloud.Metadata `json:"alibabacloud,omitempty"`
AWS *aws.Metadata `json:"aws,omitempty"`
OpenStack *openstack.Metadata `json:"openstack,omitempty"`
Libvirt *libvirt.Metadata `json:"libvirt,omitempty"`
Azure *azure.Metadata `json:"azure,omitempty"`
GCP *gcp.Metadata `json:"gcp,omitempty"`
IBMCloud *ibmcloud.Metadata `json:"ibmcloud,omitempty"`
BareMetal *baremetal.Metadata `json:"baremetal,omitempty"`
Ovirt *ovirt.Metadata `json:"ovirt,omitempty"`
PowerVS *powervs.Metadata `json:"powervs,omitempty"`
VSphere *vsphere.Metadata `json:"vsphere,omitempty"`
Nutanix *nutanix.Metadata `json:"nutanix,omitempty"`
}
// Platform returns a string representation of the platform
// (e.g. "aws" if AWS is non-nil). It returns an empty string if no
// platform is configured.
func (cpm *ClusterPlatformMetadata) Platform() string {
if cpm == nil {
return ""
}
if cpm.AlibabaCloud != nil {
return alibabacloud.Name
}
if cpm.AWS != nil {
return aws.Name
}
if cpm.Libvirt != nil {
return libvirt.Name
}
if cpm.OpenStack != nil {
return openstack.Name
}
if cpm.Azure != nil {
return azure.Name
}
if cpm.GCP != nil {
return gcp.Name
}
if cpm.IBMCloud != nil {
return ibmcloud.Name
}
if cpm.BareMetal != nil {
return "baremetal"
}
if cpm.Ovirt != nil {
return ovirt.Name
}
if cpm.PowerVS != nil {
return powervs.Name
}
if cpm.VSphere != nil {
return vsphere.Name
}
if cpm.Nutanix != nil {
return nutanix.Name
}
return ""
}
|
package _303_Range_Sum_Query_Immutable
type NumArray struct {
result []int
}
func Constructor(nums []int) NumArray {
na := NumArray{
result: nums,
}
for i := 1; i < len(nums); i++ {
na.result[i] = na.result[i-1] + nums[i]
}
return na
}
func (this *NumArray) SumRange(i int, j int) int {
if i > j {
return -1
}
if i == 0 {
return this.result[j]
}
return this.result[j] - this.result[i-1]
}
/**
* Your NumArray object will be instantiated and called as such:
* obj := Constructor(nums);
* param_1 := obj.SumRange(i,j);
*/
|
package main
import (
"errors"
"log"
"math"
)
func main() {
y := 9.0
i, err := sqrt(y)
if err != nil {
log.Fatalln(err)
}
log.Printf("Sqrt of %v = %f", y, i)
}
func sqrt(f float64) (float64, error) {
if f < 0 {
return 0, errors.New("Square root of negative number")
}
return math.Pow(f, 0.5), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.