text stringlengths 11 4.05M |
|---|
package xbase
import (
"encoding/hex"
"log"
"os"
"sync"
"github.com/syndtr/goleveldb/leveldb"
)
func MustHexEncode(src []byte) []byte {
sz := hex.EncodedLen(len(src))
dst := make([]byte, sz)
hex.Encode(dst, src)
return dst
}
func MustHexEncodeWithEOF(src []byte) []byte {
dst := MustHexEncode(src)
return append(dst, indicesEOF)
}
func MustHexDecode(src []byte) []byte {
dst := make([]byte, hex.DecodedLen(len(src)))
n, err := hex.Decode(dst, src)
if err != nil {
panic(err)
}
return dst[0:n]
}
var (
indicesMap map[byte]int = map[byte]int{
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'a': 10,
'b': 11,
'c': 12,
'd': 13,
'e': 14,
'f': 15,
'g': 16,
}
indicesEOF byte = 'g'
)
type XBase struct {
root *Node
db *leveldb.DB
log *log.Logger
lock sync.RWMutex
}
func NewXBase(path string, root []byte) *XBase {
// set root as a NOD, root is always a NOD
n := newNode(nil)
xbase := &XBase{}
db, err := leveldb.OpenFile(path, nil)
if err != nil {
panic(err)
}
xbase.db = db
xbase.log = log.New(os.Stderr, "[XBase]", 0)
if root != nil {
rootBytes, err := db.Get(root, nil)
if err != nil {
panic(err)
}
xbase.log.Printf("get root: %x\n", rootBytes)
//get root from
if err := n.FromBytes(rootBytes); err != nil {
panic(err)
}
}
xbase.root = n
return xbase
}
func (xbase *XBase) Put(key, value []byte) error {
xbase.lock.Lock()
defer xbase.lock.Unlock()
xbase.ins(MustHexEncodeWithEOF(key), value)
return nil
}
func (xbase *XBase) Get(key []byte) ([]byte, error) {
xbase.lock.RLock()
defer xbase.lock.RUnlock()
return xbase.get(MustHexEncodeWithEOF(key))
}
func (xbase *XBase) Delete(key []byte) error {
xbase.lock.Lock()
defer xbase.lock.Unlock()
return xbase.del(MustHexEncodeWithEOF(key))
}
func (xbase *XBase) Commit() []byte {
xbase.lock.RLock()
defer xbase.lock.RUnlock()
batchWrite := new(leveldb.Batch)
var isdirty bool
hash := xbase.commit(xbase.root, batchWrite, &isdirty)
if isdirty {
xbase.log.Printf("dirty root, begin to write batch")
if err := xbase.db.Write(batchWrite, nil); err != nil {
panic(err)
}
}
return hash
}
func (xbase *XBase) Close() {
if xbase.db != nil {
xbase.db.Close()
}
}
|
package cmd
import (
"flag"
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog"
"github.com/magratheaguide/cybertron/pkg/theme"
)
var (
version string
versionCommit string
inputFile string
stylesheet string
name string
wrapper string
macros string
templates string
rootDir string
outputDir string
)
func init() {
rootCmd.AddCommand(buildCmd)
buildCmd.PersistentFlags().StringVarP(&name, "name", "n", "theme", "The name of the theme. Will be used as the filename.")
buildCmd.PersistentFlags().StringVarP(&stylesheet, "stylesheet", "s", "assets/stylesheet.css", "The path to the stylesheet.")
buildCmd.PersistentFlags().StringVarP(&wrapper, "wrapper", "w", "wrapper.html", "The path to the wrapper.html.")
buildCmd.PersistentFlags().StringVarP(¯os, "macros-folder", "m", "macros", "The name of the folder with the macros in it.")
buildCmd.PersistentFlags().StringVarP(&templates, "templates-folder", "t", "html-templates", "The name of the folder with the templates in it.")
buildCmd.PersistentFlags().StringVarP(&rootDir, "directory", "d", "./", "Where to run cybertron. Defaults to current directory.")
buildCmd.PersistentFlags().StringVarP(&outputDir, "output-directory", "o", "./", "Directory to output the theme xml file to.")
rootCmd.AddCommand(readCmd)
readCmd.PersistentFlags().StringVarP(&inputFile, "input-file", "f", "", "The file to input")
klog.InitFlags(nil)
pflag.CommandLine.AddGoFlag(flag.CommandLine.Lookup("v"))
}
var rootCmd = &cobra.Command{
Use: "cybertron",
Short: "cybertron",
Long: `A tool to compile Jcink XML files`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("You must specify a sub-command.")
err := cmd.Help()
if err != nil {
klog.Error(err)
}
os.Exit(1)
},
}
var buildCmd = &cobra.Command{
Use: "build",
Short: "build",
Long: `Builds a jcink xml theme from a set of files`,
Run: func(cmd *cobra.Command, args []string) {
config := theme.Config{
Name: name,
StylesSheet: filepath.Join(rootDir, stylesheet),
Wrapper: filepath.Join(rootDir, wrapper),
MacroFolder: macros,
TemplateFolder: templates,
RootDir: rootDir,
}
output, err := config.Construct()
if err != nil {
klog.Fatal(err)
}
if output == nil {
klog.Fatal("got a nil return. something has gone very wrong.")
os.Exit(1)
}
outputFile := fmt.Sprintf("%s/%s.xml", outputDir, name)
f, err := os.Create(outputFile)
if err != nil {
klog.Fatal(err)
}
defer f.Close()
_, err = f.WriteString(*output)
if err != nil {
klog.Fatal(err)
}
fmt.Printf("Successfully exported to %s\n", outputFile)
},
}
var readCmd = &cobra.Command{
Use: "read",
Short: "read",
Long: `Parses a theme file.`,
Run: func(cmd *cobra.Command, args []string) {
err := theme.Read(inputFile)
if err != nil {
klog.Error(err)
os.Exit(1)
}
os.Exit(0)
},
}
// Execute the stuff
func Execute(VERSION string, COMMIT string) {
version = VERSION
versionCommit = COMMIT
if err := rootCmd.Execute(); err != nil {
klog.Error(err)
os.Exit(1)
}
}
|
package main
import (
"io"
"strings"
"testing"
"unicode"
)
func NewTestWriter(t *testing.T) io.Writer {
return &TestWriter{t: t}
}
type TestWriter struct {
t *testing.T
}
func (w *TestWriter) Write(p []byte) (n int, err error) {
w.t.Helper()
s := string(p)
n = len(s)
s = strings.TrimRightFunc(string(p), unicode.IsSpace)
w.t.Log(s)
return
}
|
package qiwi
import (
"bytes"
"compress/zlib"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
)
func Examplezlibzompress() {
// eJzzSM3JyVcozy/KSQEAGKsEPQ== -- compressed and base64 encoded
// if string(res) != string(result) {
// t.Errorf("Wrong result of compress function %+q", res)
// }
data := []byte("Hello world")
res := zlibzompress(data)
fmt.Println([]byte(res))
// Output: [120 156 242 72 205 201 201 87 40 207 47 202 73 1 4 0 0 255 255 24 171 4 61]
}
func TestGooglePay(t *testing.T) {
// google_token_structure := []byte(`
// "paymentMethod": {
// "type": "GOOGLE_PAY_TOKEN",
// "paymentToken": "eJxVUtuK2zAQfd+vCHGShS9mS0hb8YChjabx"
// }
// `)
type GooglePayToken struct {
ProtoVer string `json:"protocolVersion"`
}
google_pay_token := []byte(`{
"protocolVersion":"ECv2",
"signature":"MEQCIH6Q4OwQ0jAceFEkGF0JID6sJNXxOEi4r+mA7biRxqBQAiAondqoUpU/bdsrAOpZIsrHQS9nwiiNwOrr24RyPeHA0Q\u003d\u003d",
"intermediateSigningKey":{
"signedKey": "{\"keyExpiration\":\"1542323393147\",\"keyValue\":\"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE/1+3HBVSbdv+j7NaArdgMyoSAM43yRydzqdg1TxodSzA96Dj4Mc1EiKroxxunavVIvdxGnJeFViTzFvzFRxyCw\\u003d\\u003d\"}",
"signatures": ["MEYCIQCO2EIi48s8VTH+ilMEpoXLFfkxAwHjfPSCVED/QDSHmQIhALLJmrUlNAY8hDQRV/y1iKZGsWpeNmIP+z+tCQHQxP0v"]
},
"signedMessage":"{\"tag\":\"jpGz1F1Bcoi/fCNxI9n7Qrsw7i7KHrGtTf3NrRclt+U\\u003d\",\"ephemeralPublicKey\":\"BJatyFvFPPD21l8/uLP46Ta1hsKHndf8Z+tAgk+DEPQgYTkhHy19cF3h/bXs0tWTmZtnNm+vlVrKbRU9K8+7cZs\\u003d\",\"encryptedMessage\":\"mKOoXwi8OavZ\"}"
}`)
// HTTP MOCK
serv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var err error
var buf, b bytes.Buffer
var p Payment
var tk GooglePayToken
io.Copy(&buf, r.Body)
err = json.Unmarshal(buf.Bytes(), &p)
if err != nil {
fmt.Fprintln(w, `{
"serviceName" : "payin-core",
"errorCode" : "validation.json",
"description" : "`+err.Error()+`",
"userMessage" : "Validation error",
"dateTime" : "2018-11-13T16:49:59.166+03:00",
"traceId" : "fd0e2a08c63ace83"
}`)
return
}
//
// enc := base64.NewDecoder(base64.StdEncoding, )
// enc.Read(buf.Bytes())
dec, err := base64.StdEncoding.DecodeString(p.PaymentMethod.GooglePaymentToken)
if err != nil {
fmt.Fprintln(w, `{
"serviceName" : "payin-core",
"errorCode" : "validation.base64",
"description" : "`+err.Error()+`",
"userMessage" : "Validation error",
"dateTime" : "2018-11-13T16:49:59.166+03:00",
"traceId" : "fd0e2a08c63ace83"
}`)
return
}
br := bytes.NewReader(dec)
data, err := zlib.NewReader(br)
if err != nil {
fmt.Fprintln(w, `{
"serviceName" : "payin-core",
"errorCode" : "validation.zlib",
"description" : "`+err.Error()+`",
"userMessage" : "Validation error",
"dateTime" : "2018-11-13T16:49:59.166+03:00",
"traceId" : "fd0e2a08c63ace83"
}`)
return
}
defer data.Close()
io.Copy(&b, data)
err = json.Unmarshal(b.Bytes(), &tk)
if err != nil {
fmt.Fprintln(w, `{
"serviceName" : "payin-core",
"errorCode" : "validation.tokenjson",
"description" : "`+err.Error()+`",
"userMessage" : "Validation error",
"dateTime" : "2018-11-13T16:49:59.166+03:00",
"traceId" : "fd0e2a08c63ace83"
}`)
return
}
if tk.ProtoVer != "ECv2" {
fmt.Fprintln(w, `{
"serviceName" : "payin-core",
"errorCode" : "validation.tokenversionpayload",
"description" : "`+err.Error()+`",
"userMessage" : "Validation error",
"dateTime" : "2018-11-13T16:49:59.166+03:00",
"traceId" : "fd0e2a08c63ace83"
}`)
return
}
fmt.Fprintln(w, `{
"siteId": "23044",
"billId": "893794793973",
"amount": {
"value": 100.00,
"currency": "RUB"
},
"status": {
"value": "CREATED",
"changedDateTime": "2018-03-05T11:27:41"
},
"comment": "Text comment",
"customFields": {
"cf1": "Some data",
"FROM_MERCHANT_CONTRACT_ID": "1234"
},
"creationDateTime": "2018-03-05T11:27:41",
"expirationDateTime": "2018-04-13T14:30:00",
"payUrl": "https://oplata.qiwi.com/form/?invoice_uid=d875277b-6f0f-445d-8a83-f62c7c07be77"
}`)
}))
serv.Start()
defer serv.Close()
// Route request to mocked http server
pay := New("billId", "SiteID", "TOKEN", serv.URL)
amount := 500 // 5.00RUB
err := pay.GooglePay(context.TODO(), amount, google_pay_token)
if err != nil {
t.Errorf("GooglePay method error: %s", err)
}
}
|
package main
import (
"fmt"
"mygithub/twogocode/d01工厂模式解决私有结构体引入/student"
)
func main() {
//返回值 stu是指针类型
stu := student.NewStudent("tom", 88.8)
//"*"取指向数据值
fmt.Println(*stu)
//(*stu)可简写为 stu
//fmt.Println("name=",(*stu).Name,"score=",(*stu).Score)
//fmt.Println("name=",stu.Name,"score=",stu.Score)
//当结构体的字段是私有时,使用方法进行获取该字段
fmt.Println("name=", stu.Name, "score=", stu.GetScore())
}
|
package luxafor
import (
"time"
"github.com/karalabe/hid"
"github.com/pkg/errors"
)
// Luxafor is used to access the devices.
type Luxafor struct {
deviceInfo hid.DeviceInfo
}
const (
vendorID uint16 = 0x04d8
deviceID uint16 = 0xf372
)
// Enumerate returns a slice of attached Luxafors
func Enumerate() []Luxafor {
infos := hid.Enumerate(vendorID, deviceID)
luxs := make([]Luxafor, len(infos))
for _, info := range infos {
lux := Luxafor{
deviceInfo: info,
}
luxs = append(luxs, lux)
}
return luxs
}
func (lux Luxafor) sendCommand(command byte, led LED, r, g, b, speed uint8) (err error) {
info := lux.deviceInfo
device, err := info.Open()
if err != nil {
return errors.Wrap(err, "open lux")
}
defer func() { _ = device.Close() }() // Best effort.
// Sets specified LED to RGB.
if _, err := device.Write([]byte{command, byte(led), r, g, b}); err != nil {
return errors.Wrap(err, "device write")
}
return nil
}
// Solid turns the specified luxafor into a solid RGB color.
func (lux Luxafor) Solid(r, g, b uint8) (err error) {
return lux.Set(All, r, g, b)
}
// Set sets a golux.LED to the specific RGB value.
func (lux Luxafor) Set(led LED, r, g, b uint8) (err error) {
return lux.sendCommand(static, led, r, g, b, 0) // speed isn't used
}
// Sets sets multiple golux.LED to the specific RGB value.
func (lux Luxafor) Sets(leds []LED, r, g, b uint8) (err error) {
for _, led := range leds {
if err := lux.Set(led, r, g, b); err != nil {
return errors.Wrap(err, "set led")
}
}
return nil
}
// Fade sets the led to rgb at speed.
func (lux Luxafor) Fade(led LED, r, g, b, speed uint8) (err error) {
return lux.sendCommand(fade, led, r, g, b, speed)
}
// Police look like da popo
func (lux Luxafor) Police(loops int) (err error) {
for i := 0; i < loops; i++ {
lux.Fade(FrontAll, 255, 0, 0, 255)
lux.Fade(BackAll, 0, 0, 255, 255)
time.Sleep(500 * time.Millisecond)
lux.Fade(FrontAll, 0, 0, 255, 255)
lux.Fade(BackAll, 255, 0, 0, 255)
time.Sleep(500 * time.Millisecond)
}
return nil
}
// Off turns off the luxafor.
func (lux Luxafor) Off() (err error) {
info := lux.deviceInfo
device, err := info.Open()
if err != nil {
return errors.Wrap(err, "open lux")
}
defer func() { _ = device.Close() }() // Best effort.
// Turns off the leds.
if _, err := device.Write([]byte{static, byte(All), 0, 0, 0}); err != nil {
return errors.Wrap(err, "device write")
}
return nil
}
|
// Copyright 2019 Kuei-chun Chen. All rights reserved.
package analytics
import (
"io/ioutil"
"os"
"strings"
"testing"
)
const DiagnosticDataDirectory = "../diagnostic.data"
const DiagnosticDataFilename = DiagnosticDataDirectory + "/metrics.2017-10-12T20-08-53Z-00000"
func TestReadDiagnosticFiles(t *testing.T) {
var err error
var files []os.FileInfo
var filenames []string
if files, err = ioutil.ReadDir(DiagnosticDataDirectory); err != nil {
t.Fatal(err)
}
for _, f := range files {
if strings.Index(f.Name(), "metrics.") != 0 && strings.Index(f.Name(), "keyhole_stats.") != 0 {
continue
}
filename := DiagnosticDataDirectory + "/" + f.Name()
filenames = append(filenames, filename)
}
d := NewDiagnosticData()
if err = d.readDiagnosticFiles(filenames); err != nil {
t.Fatal(err)
}
}
|
package tx
import (
"time"
"incognito-chain/common"
"incognito-chain/privacy"
"incognito-chain/privacy/privacy_v1/schnorr"
"incognito-chain/privacy/privacy_v1/zeroknowledge/serialnumbernoprivacy"
"incognito-chain/privacy/privacy_v2"
)
func SignNoPrivacy(privKey *privacy.PrivateKey, hashedMessage []byte) (signatureBytes []byte, sigPubKey []byte, err error) {
sk := new(privacy.Scalar).FromBytesS(*privKey)
r := new(privacy.Scalar).FromUint64(0)
sigKey := new(schnorr.SchnorrPrivateKey)
sigKey.Set(sk, r)
signature, err := sigKey.Sign(hashedMessage)
if err != nil {
return nil, nil, err
}
signatureBytes = signature.Bytes()
sigPubKey = sigKey.GetPublicKey().GetPublicKey().ToBytesS()
return signatureBytes, sigPubKey, nil
}
func initializeTxConversion(tx *Tx, params *TxParams, paymentsPtr *[]PaymentReader) error {
var err error
// Get Keyset from param
skBytes := *params.SenderSK
senderPaymentAddress := privacy.GeneratePaymentAddress(skBytes)
// Tx: initialize some values
tx.Fee = params.Fee
tx.Version = 2
tx.Type = common.TxConversionType
tx.pubKeyLastByteSender = common.GetShardIDFromLastByte(senderPaymentAddress.Pk[len(senderPaymentAddress.Pk)-1])
// non-zero means it was set before
if tx.LockTime == 0 {
tx.LockTime = time.Now().Unix()
}
tx.Info = params.Info
// Params: update balance if overbalance
if err = updateParamsWhenOverBalance(paymentsPtr, params, senderPaymentAddress); err != nil {
return err
}
return nil
}
func getOutputcoinsFromPaymentInfo(paymentInfos []*privacy.PaymentInfo, tokenID *common.Hash) ([]*privacy.CoinV2, error) {
var err error
isPRV := (tokenID == nil) || (*tokenID == common.PRVCoinID)
c := make([]*privacy.CoinV2, len(paymentInfos))
for i := 0; i < len(paymentInfos); i += 1 {
if isPRV {
c[i], _, err = privacy.NewCoinFromPaymentInfo(privacy.NewCoinParams().FromPaymentInfo(paymentInfos[i]))
if err != nil {
return nil, err
}
} else {
createdCACoin, _, _, err := privacy.NewCoinCA(privacy.NewCoinParams().FromPaymentInfo(paymentInfos[i]), tokenID)
if err != nil {
return nil, err
}
createdCACoin.SetPlainTokenID(tokenID)
c[i] = createdCACoin
}
}
return c, nil
}
func proveConversion(tx *Tx, params *TxParams) error {
lenInputs := len(params.InputCoins)
inputCoins := params.InputCoins
var err error
outputCoins, err := getOutputcoinsFromPaymentInfo(params.PaymentInfo, params.TokenID)
if err != nil {
return err
}
serialnumberWitness := make([]*serialnumbernoprivacy.SNNoPrivacyWitness, lenInputs)
for i := 0; i < len(inputCoins); i++ {
/***** Build witness for proving that serial number is derived from the committed derivator *****/
serialnumberWitness[i] = new(serialnumbernoprivacy.SNNoPrivacyWitness)
serialnumberWitness[i].Set(inputCoins[i].GetKeyImage(), inputCoins[i].GetPublicKey(),
inputCoins[i].GetSNDerivator(), new(privacy.Scalar).FromBytesS(*params.SenderSK))
}
tx.Proof, err = privacy_v2.ProveConversion(inputCoins, outputCoins, serialnumberWitness)
if err != nil {
return err
}
if tx.Sig, tx.SigPubKey, err = SignNoPrivacy(params.SenderSK, tx.Hash()[:]); err != nil {
return err
}
return nil
}
func Convert(tx *Tx, params *ExtendedParams, theirTime int64) error {
gParams, err := params.GetGenericParams()
if err != nil {
return err
}
if err := initializeTxConversion(tx, gParams, ¶ms.PaymentInfo); err != nil {
return err
}
if theirTime > 0 {
tx.LockTime = theirTime
}
if err := proveConversion(tx, gParams); err != nil {
return err
}
return nil
}
func (txToken *TxToken) initTokenConversion(txNormal *Tx, params *ExtendedParams) error {
txToken.TokenData.Type = CustomTokenTransfer
txToken.TokenData.PropertyName = ""
txToken.TokenData.PropertySymbol = ""
txToken.TokenData.Mintable = false
propertyID, _ := common.TokenStringToHash(params.TokenParams.TokenID)
txToken.TokenData.PropertyID = *propertyID
temp, err := params.TokenParams.GetTokenParams()
if err != nil {
return err
}
txConvertParams := NewTxParams(
¶ms.SenderSK,
temp.Receiver,
temp.TokenInput,
0,
false,
propertyID,
nil,
params.Info,
)
if err := initializeTxConversion(txNormal, txConvertParams, ¶ms.TokenParams.TokenPaymentInfo); err != nil {
return err
}
txNormal.Type = TxTokenConversionType
if err := proveConversion(txNormal, txConvertParams); err != nil {
return err
}
return txToken.SetTxNormal(txNormal)
}
func (txToken *TxToken) initPRVFeeConversion(feeTx *Tx, params *ExtendedParams) ([]privacy.PlainCoin, []uint64, []*privacy.CoinV2, error) {
feeTx.Version = 2
feeTx.Type = common.TxTokenConversionType
inps, inputIndexes, outs, err := feeTx.provePRV(params)
if err != nil {
return nil, nil, nil, err
}
txToken.Tx = *feeTx
return inps, inputIndexes, outs, nil
}
func ConvertToken(txToken *TxToken, params *ExtendedParams, theirTime int64) error {
params.HasPrivacy = false
txPrivacyParams, err := params.GetGenericParams()
if err != nil {
return err
}
// Init tx and params (tx and params will be changed)
tx := &Tx{}
if err := tx.initializeTxAndParams(txPrivacyParams, ¶ms.PaymentInfo); err != nil {
return err
}
if theirTime > 0 {
tx.LockTime = theirTime
}
// Init PRV Fee
inps, inputIndexes, outs, err := txToken.initPRVFeeConversion(tx, params)
if err != nil {
return err
}
txn := makeTxToken(&txToken.Tx, nil, nil, nil)
// Init Token
if err := txToken.initTokenConversion(txn, params); err != nil {
return err
}
tdh, err := txToken.TokenData.Hash()
if err != nil {
return err
}
message := common.HashH(append(txToken.Tx.Hash()[:], tdh[:]...))
err = txToken.Tx.sign(inps, inputIndexes, outs, params, message[:])
if err != nil {
return err
}
return nil
}
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00600102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.006.001.02 Document"`
Message *StatementOfInvestmentFundTransactionsV02 `xml:"StmtOfInvstmtFndTxsV02"`
}
func (d *Document00600102) AddMessage() *StatementOfInvestmentFundTransactionsV02 {
d.Message = new(StatementOfInvestmentFundTransactionsV02)
return d.Message
}
// Scope
// An account servicer, for example, a transfer agent sends the StatementOfInvestmentFundTransactions message to the account owner, for example, an investment manager or its authorised representative to provide detailed transactions (increases and decreases) of holdings which occurred during a specified period of time.
// Usage
// The StatementOfInvestmentFundTransactions message is used to list the holdings transactions of a single (master) account or several sub-accounts.
// This message should be used at a frequency agreed bi-laterally between the account servicer and the account owner.
// This message must not be used in place of confirmation messages.
type StatementOfInvestmentFundTransactionsV02 struct {
// Reference that uniquely identifies a message from a business application standpoint.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Reference to a linked message that was previously sent.
PreviousReference []*iso20022.AdditionalReference2 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference []*iso20022.AdditionalReference2 `xml:"RltdRef,omitempty"`
// Pagination of the message.
MessagePagination *iso20022.Pagination `xml:"MsgPgntn"`
// General information related to the investment fund statement of transactions.
StatementGeneralDetails *iso20022.Statement8 `xml:"StmtGnlDtls"`
// Information related to an investment account.
InvestmentAccountDetails *iso20022.InvestmentAccount25 `xml:"InvstmtAcctDtls"`
// Creation/cancellation of investment units on the books of the fund or its designated agent, as a result of executing an investment fund order.
TransactionOnAccount []*iso20022.InvestmentFundTransactionsByFund2 `xml:"TxOnAcct,omitempty"`
// The sub-account of the safekeeping or investment account.
SubAccountDetails []*iso20022.SubAccountIdentification6 `xml:"SubAcctDtls,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (s *StatementOfInvestmentFundTransactionsV02) AddMessageIdentification() *iso20022.MessageIdentification1 {
s.MessageIdentification = new(iso20022.MessageIdentification1)
return s.MessageIdentification
}
func (s *StatementOfInvestmentFundTransactionsV02) AddPreviousReference() *iso20022.AdditionalReference2 {
newValue := new(iso20022.AdditionalReference2)
s.PreviousReference = append(s.PreviousReference, newValue)
return newValue
}
func (s *StatementOfInvestmentFundTransactionsV02) AddRelatedReference() *iso20022.AdditionalReference2 {
newValue := new(iso20022.AdditionalReference2)
s.RelatedReference = append(s.RelatedReference, newValue)
return newValue
}
func (s *StatementOfInvestmentFundTransactionsV02) AddMessagePagination() *iso20022.Pagination {
s.MessagePagination = new(iso20022.Pagination)
return s.MessagePagination
}
func (s *StatementOfInvestmentFundTransactionsV02) AddStatementGeneralDetails() *iso20022.Statement8 {
s.StatementGeneralDetails = new(iso20022.Statement8)
return s.StatementGeneralDetails
}
func (s *StatementOfInvestmentFundTransactionsV02) AddInvestmentAccountDetails() *iso20022.InvestmentAccount25 {
s.InvestmentAccountDetails = new(iso20022.InvestmentAccount25)
return s.InvestmentAccountDetails
}
func (s *StatementOfInvestmentFundTransactionsV02) AddTransactionOnAccount() *iso20022.InvestmentFundTransactionsByFund2 {
newValue := new(iso20022.InvestmentFundTransactionsByFund2)
s.TransactionOnAccount = append(s.TransactionOnAccount, newValue)
return newValue
}
func (s *StatementOfInvestmentFundTransactionsV02) AddSubAccountDetails() *iso20022.SubAccountIdentification6 {
newValue := new(iso20022.SubAccountIdentification6)
s.SubAccountDetails = append(s.SubAccountDetails, newValue)
return newValue
}
func (s *StatementOfInvestmentFundTransactionsV02) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
s.Extension = append(s.Extension, newValue)
return newValue
}
|
package pid
import (
"errors"
"os"
"strconv"
"syscall"
)
var (
ErrProcessExists = errors.New("Process already exists.")
)
type PidFile struct {
path string
Pid int
}
func New(path string) (pf *PidFile, err error) {
pf = &PidFile{path, os.Getpid()}
var f *os.File
f, err = os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600)
if err != nil { // file exists
f, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
return
}
var pidstr [10]byte
var n int
// read pid
n, err = f.Read(pidstr[:])
if err != nil {
return
}
var pid int
pid, err = strconv.Atoi(string(pidstr[:n]))
if err != nil {
return
}
// find pid
if err = syscall.Kill(pid, 0); err == nil {
err = ErrProcessExists
return
}
f.Truncate(int64(n))
}
defer f.Close()
_, err = f.WriteString(strconv.Itoa(pf.Pid))
return
}
func (pf *PidFile) Close() error {
return os.Remove(pf.path)
}
|
package postgres
import _ "github.com/lib/pq"
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/apigee/beta/apigee_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta"
)
// EnvgroupServer implements the gRPC interface for Envgroup.
type EnvgroupServer struct{}
// ProtoToEnvgroupStateEnum converts a EnvgroupStateEnum enum from its proto representation.
func ProtoToApigeeBetaEnvgroupStateEnum(e betapb.ApigeeBetaEnvgroupStateEnum) *beta.EnvgroupStateEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ApigeeBetaEnvgroupStateEnum_name[int32(e)]; ok {
e := beta.EnvgroupStateEnum(n[len("ApigeeBetaEnvgroupStateEnum"):])
return &e
}
return nil
}
// ProtoToEnvgroup converts a Envgroup resource from its proto representation.
func ProtoToEnvgroup(p *betapb.ApigeeBetaEnvgroup) *beta.Envgroup {
obj := &beta.Envgroup{
Name: dcl.StringOrNil(p.GetName()),
CreatedAt: dcl.Int64OrNil(p.GetCreatedAt()),
LastModifiedAt: dcl.Int64OrNil(p.GetLastModifiedAt()),
State: ProtoToApigeeBetaEnvgroupStateEnum(p.GetState()),
ApigeeOrganization: dcl.StringOrNil(p.GetApigeeOrganization()),
}
for _, r := range p.GetHostnames() {
obj.Hostnames = append(obj.Hostnames, r)
}
return obj
}
// EnvgroupStateEnumToProto converts a EnvgroupStateEnum enum to its proto representation.
func ApigeeBetaEnvgroupStateEnumToProto(e *beta.EnvgroupStateEnum) betapb.ApigeeBetaEnvgroupStateEnum {
if e == nil {
return betapb.ApigeeBetaEnvgroupStateEnum(0)
}
if v, ok := betapb.ApigeeBetaEnvgroupStateEnum_value["EnvgroupStateEnum"+string(*e)]; ok {
return betapb.ApigeeBetaEnvgroupStateEnum(v)
}
return betapb.ApigeeBetaEnvgroupStateEnum(0)
}
// EnvgroupToProto converts a Envgroup resource to its proto representation.
func EnvgroupToProto(resource *beta.Envgroup) *betapb.ApigeeBetaEnvgroup {
p := &betapb.ApigeeBetaEnvgroup{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetCreatedAt(dcl.ValueOrEmptyInt64(resource.CreatedAt))
p.SetLastModifiedAt(dcl.ValueOrEmptyInt64(resource.LastModifiedAt))
p.SetState(ApigeeBetaEnvgroupStateEnumToProto(resource.State))
p.SetApigeeOrganization(dcl.ValueOrEmptyString(resource.ApigeeOrganization))
sHostnames := make([]string, len(resource.Hostnames))
for i, r := range resource.Hostnames {
sHostnames[i] = r
}
p.SetHostnames(sHostnames)
return p
}
// applyEnvgroup handles the gRPC request by passing it to the underlying Envgroup Apply() method.
func (s *EnvgroupServer) applyEnvgroup(ctx context.Context, c *beta.Client, request *betapb.ApplyApigeeBetaEnvgroupRequest) (*betapb.ApigeeBetaEnvgroup, error) {
p := ProtoToEnvgroup(request.GetResource())
res, err := c.ApplyEnvgroup(ctx, p)
if err != nil {
return nil, err
}
r := EnvgroupToProto(res)
return r, nil
}
// applyApigeeBetaEnvgroup handles the gRPC request by passing it to the underlying Envgroup Apply() method.
func (s *EnvgroupServer) ApplyApigeeBetaEnvgroup(ctx context.Context, request *betapb.ApplyApigeeBetaEnvgroupRequest) (*betapb.ApigeeBetaEnvgroup, error) {
cl, err := createConfigEnvgroup(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyEnvgroup(ctx, cl, request)
}
// DeleteEnvgroup handles the gRPC request by passing it to the underlying Envgroup Delete() method.
func (s *EnvgroupServer) DeleteApigeeBetaEnvgroup(ctx context.Context, request *betapb.DeleteApigeeBetaEnvgroupRequest) (*emptypb.Empty, error) {
cl, err := createConfigEnvgroup(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteEnvgroup(ctx, ProtoToEnvgroup(request.GetResource()))
}
// ListApigeeBetaEnvgroup handles the gRPC request by passing it to the underlying EnvgroupList() method.
func (s *EnvgroupServer) ListApigeeBetaEnvgroup(ctx context.Context, request *betapb.ListApigeeBetaEnvgroupRequest) (*betapb.ListApigeeBetaEnvgroupResponse, error) {
cl, err := createConfigEnvgroup(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListEnvgroup(ctx, request.GetApigeeOrganization())
if err != nil {
return nil, err
}
var protos []*betapb.ApigeeBetaEnvgroup
for _, r := range resources.Items {
rp := EnvgroupToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListApigeeBetaEnvgroupResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigEnvgroup(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package download
import(
"config"
"fmt"
)
type StockRtDownloader struct {
config config.ServiceAPI
}
func (s *StockRtDownloader) GetData(code, exchange string) string {
var excode string
switch exchange {
case "EX$$$$XSHG":
excode = "sh"
case "EX$$$$XSHE":
excode = "sz"
}
url := fmt.Sprintf(s.config.Uri, excode, code)
resp := HttpGet(url)
return resp
}
func NewStockRtDownloader() *StockRtDownloader{
cm := config.NewServiceConfigManager()
downloader := new(StockRtDownloader)
downloader.config = cm.GetApi("sina-realtime", "realtime")
return downloader
}
|
package kaniko
import (
"io"
"strings"
)
type kanikoLogger struct {
out io.Writer
}
// Implement the io.Writer interface
func (k kanikoLogger) Write(p []byte) (n int, err error) {
str := string(p)
lines := strings.Split(str, "\n")
newLines := make([]string, 0, len(lines))
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
if strings.HasSuffix(trimmedLine, ", because it was changed.") {
continue
}
if strings.HasSuffix(trimmedLine, "No matching credentials were found, falling back on anonymous") {
continue
}
if strings.HasPrefix(trimmedLine, "ERROR: logging before flag.Parse:") {
continue
}
if strings.HasSuffix(trimmedLine, "Taking snapshot of full filesystem...") {
continue
}
if strings.HasSuffix(trimmedLine, "Taking snapshot of files...") {
continue
}
if strings.HasSuffix(trimmedLine, "No files changed in this command, skipping snapshotting.") {
continue
}
if strings.Index(trimmedLine, "Error while retrieving image from cache: getting file info") != -1 {
continue
}
newLines = append(newLines, line)
}
i, err := k.out.Write([]byte(strings.Join(newLines, "\n")))
if err != nil {
return i, err
}
return len(p), nil
}
|
package main
import (
"errors"
"fmt"
"strconv"
"time"
)
// ErrInput - If inputs are invalid
var ErrInput = errors.New("INPUT ERROR")
// ListNode - a single node that composes the list
type ListNode struct {
Val int
Next *ListNode
}
// LinkedListNode the linked list of Items
type LinkedListNode struct {
Head *ListNode
Size int
}
func (ll *LinkedListNode) String() string {
stringNodes := ""
if ll.Head == nil {
stringNodes = strconv.Itoa(ll.Head.Val)
} else {
last := ll.Head
for {
stringNodes = stringNodes + strconv.Itoa(last.Val) + " "
if last.Next == nil {
break
}
last = last.Next
}
}
return stringNodes
}
func timeTrack(start time.Time, name string, l1 LinkedListNode, l2 LinkedListNode) {
elapsed := time.Since(start)
fmt.Println(name)
fmt.Println("l1: ", l1.String())
fmt.Println("l2: ", l2.String())
fmt.Println("Run Time: " + elapsed.String())
fmt.Println("------------------------------")
}
// Append - Add node to linked list
func (ll *LinkedListNode) Append(t int) {
node := ListNode{
Val: t,
Next: nil,
}
if ll.Head == nil {
ll.Head = &node
} else {
last := ll.Head
for {
if last.Next == nil {
break
}
last = last.Next
}
last.Next = &node
}
ll.Size++
}
// AddTwoNumbers - Solution lives here
func AddTwoNumbers(l1 LinkedListNode, l2 LinkedListNode, tc string) (LinkedListNode, error) {
defer timeTrack(time.Now(), "AddTwoNumbers - Test Case: "+tc, l1, l2)
var (
l1Last = l1.Head
l2Last = l2.Head
carry = 0
listNode LinkedListNode
)
if l1Last.Next == nil && l2Last.Next == nil {
listNode.Append(l1Last.Val + l2Last.Val)
} else {
for {
sum := l1Last.Val + l2Last.Val
val := sum + carry
if val > 9 {
carry = 1
}
listNode.Append(val % 10)
if l1Last.Next == nil || l2Last.Next == nil {
break
}
l1Last = l1Last.Next
l2Last = l2Last.Next
}
}
return listNode, nil
}
func main() {
fmt.Println("Add Two Numbers")
}
|
package random
import (
crand "crypto/rand"
"encoding/binary"
"log"
)
type Source struct{}
func (s Source) Seed(seed int64) {}
func (s Source) Int63() int64 {
return int64(s.Uint64() & ^uint64(1<<63))
}
func (s Source) Uint64() (v uint64) {
err := binary.Read(crand.Reader, binary.BigEndian, &v)
if err != nil {
log.Fatal(err)
}
return v
}
|
package gcp
import (
"context"
"github.com/pkg/errors"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"github.com/openshift/installer/pkg/types/gcp"
)
func (o *ClusterUninstaller) listHealthChecks(ctx context.Context) ([]cloudResource, error) {
return o.listHealthChecksWithFilter(ctx, "items(name),nextPageToken", o.clusterIDFilter(), nil)
}
// listHealthChecksWithFilter lists health checks in the project that satisfy the filter criteria.
// The fields parameter specifies which fields should be returned in the result, the filter string contains
// a filter string passed to the API to filter results. The filterFunc is a client-side filtering function
// that determines whether a particular result should be returned or not.
func (o *ClusterUninstaller) listHealthChecksWithFilter(ctx context.Context, fields string, filter string, filterFunc func(*compute.HealthCheck) bool) ([]cloudResource, error) {
o.Logger.Debugf("Listing health checks")
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
result := []cloudResource{}
req := o.computeSvc.HealthChecks.List(o.ProjectID).Fields(googleapi.Field(fields))
if len(filter) > 0 {
req = req.Filter(filter)
}
err := req.Pages(ctx, func(list *compute.HealthCheckList) error {
for _, item := range list.Items {
if filterFunc == nil || filterFunc != nil && filterFunc(item) {
o.Logger.Debugf("Found health check: %s", item.Name)
result = append(result, cloudResource{
key: item.Name,
name: item.Name,
typeName: "healthcheck",
quota: []gcp.QuotaUsage{{
Metric: &gcp.Metric{
Service: gcp.ServiceComputeEngineAPI,
Limit: "health_checks",
},
Amount: 1,
}},
})
}
}
return nil
})
if err != nil {
return nil, errors.Wrapf(err, "failed to list health checks")
}
return result, nil
}
func (o *ClusterUninstaller) deleteHealthCheck(ctx context.Context, item cloudResource) error {
o.Logger.Debugf("Deleting health check %s", item.name)
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
op, err := o.computeSvc.HealthChecks.Delete(o.ProjectID, item.name).RequestId(o.requestID(item.typeName, item.name)).Context(ctx).Do()
if err != nil && !isNoOp(err) {
o.resetRequestID(item.typeName, item.name)
return errors.Wrapf(err, "failed to delete health check %s", item.name)
}
if op != nil && op.Status == "DONE" && isErrorStatus(op.HttpErrorStatusCode) {
o.resetRequestID(item.typeName, item.name)
return errors.Errorf("failed to delete health check %s with error: %s", item.name, operationErrorMessage(op))
}
if (err != nil && isNoOp(err)) || (op != nil && op.Status == "DONE") {
o.resetRequestID(item.typeName, item.name)
o.deletePendingItems(item.typeName, []cloudResource{item})
o.Logger.Infof("Deleted health check %s", item.name)
}
return nil
}
// destroyHealthChecks removes all health check resources that have a name prefixed
// with the cluster's infra ID.
func (o *ClusterUninstaller) destroyHealthChecks(ctx context.Context) error {
found, err := o.listHealthChecks(ctx)
if err != nil {
return err
}
items := o.insertPendingItems("healthcheck", found)
for _, item := range items {
err := o.deleteHealthCheck(ctx, item)
if err != nil {
o.errorTracker.suppressWarning(item.key, err, o.Logger)
}
}
if items = o.getPendingItems("healthcheck"); len(items) > 0 {
return errors.Errorf("%d items pending", len(items))
}
return nil
}
|
package main
import (
"fmt"
"github.com/colefan/gsgo/console"
"github.com/colefan/gsgo/gameprotocol/login"
"github.com/colefan/gsgo/netio"
"github.com/colefan/gsgo/netio/iobuffer"
"github.com/colefan/gsgo/netio/packet"
"runtime"
"strconv"
"time"
)
type MyClient struct {
*netio.Client
}
func NewMyClient() *MyClient {
c := &MyClient{}
c.Client = netio.NewTcpClient()
return c
}
func (this *MyClient) HandleMsg(cmdid uint16, pack *packet.Packet, conn netio.ConnInf) {
//fmt.Println("[C]...read a msg, id = ", cmdid)
buf := iobuffer.NewOutBuffer(int(pack.PackLen + packet.PACKET_PROXY_HEADER_LEN))
buf = pack.Header.Encode(buf)
for _, tmp := range pack.RawData {
buf.PutByte(tmp)
}
nPackLen := buf.GetLen() - packet.PACKET_PROXY_HEADER_LEN
buf.SetUint16(uint16(nPackLen), 0)
// if conn != nil {
// conn.Write(buf.GetData())
// }
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU() - 1)
//创建N个客户端
clientList := make(map[int]*MyClient)
for i := 0; i < 3000; i++ {
go func() {
c1 := NewMyClient()
c1.SetPackDispatcher(netio.NewDefaultPackDispatcher())
c1.SetPackParser(netio.NewDefaultParser())
c1.SetServerAddress("192.168.15.26")
c1.SetServerPort(12000)
c1.GetPackDispatcher().AddPackEventListener(strconv.Itoa(i+1), c1)
err := c1.Connect() //启动客户端,看乒乓能不能打起来,哈哈!
if err != nil {
fmt.Println("client error,", err)
}
clientList[i+1] = c1
login := protocol_login.Login_Req{}
login.Packet = packet.NewEmptyPacket()
login.CmdID = 0x01
login.UserName = "yjx"
login.PWD = "1q2323"
buf := login.EncodePacket(512)
time.Sleep(20 * time.Millisecond)
//fmt.Println("client send data :", buf.GetData())
for {
time.Sleep(time.Millisecond * 500)
c1.Write(buf.GetData())
}
}()
}
console.CheckInput()
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
datastorepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/datastore/datastore_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/datastore"
)
// Server implements the gRPC interface for Index.
type IndexServer struct{}
// ProtoToIndexAncestorEnum converts a IndexAncestorEnum enum from its proto representation.
func ProtoToDatastoreIndexAncestorEnum(e datastorepb.DatastoreIndexAncestorEnum) *datastore.IndexAncestorEnum {
if e == 0 {
return nil
}
if n, ok := datastorepb.DatastoreIndexAncestorEnum_name[int32(e)]; ok {
e := datastore.IndexAncestorEnum(n[len("DatastoreIndexAncestorEnum"):])
return &e
}
return nil
}
// ProtoToIndexPropertiesDirectionEnum converts a IndexPropertiesDirectionEnum enum from its proto representation.
func ProtoToDatastoreIndexPropertiesDirectionEnum(e datastorepb.DatastoreIndexPropertiesDirectionEnum) *datastore.IndexPropertiesDirectionEnum {
if e == 0 {
return nil
}
if n, ok := datastorepb.DatastoreIndexPropertiesDirectionEnum_name[int32(e)]; ok {
e := datastore.IndexPropertiesDirectionEnum(n[len("DatastoreIndexPropertiesDirectionEnum"):])
return &e
}
return nil
}
// ProtoToIndexStateEnum converts a IndexStateEnum enum from its proto representation.
func ProtoToDatastoreIndexStateEnum(e datastorepb.DatastoreIndexStateEnum) *datastore.IndexStateEnum {
if e == 0 {
return nil
}
if n, ok := datastorepb.DatastoreIndexStateEnum_name[int32(e)]; ok {
e := datastore.IndexStateEnum(n[len("DatastoreIndexStateEnum"):])
return &e
}
return nil
}
// ProtoToIndexProperties converts a IndexProperties resource from its proto representation.
func ProtoToDatastoreIndexProperties(p *datastorepb.DatastoreIndexProperties) *datastore.IndexProperties {
if p == nil {
return nil
}
obj := &datastore.IndexProperties{
Name: dcl.StringOrNil(p.Name),
Direction: ProtoToDatastoreIndexPropertiesDirectionEnum(p.GetDirection()),
}
return obj
}
// ProtoToIndex converts a Index resource from its proto representation.
func ProtoToIndex(p *datastorepb.DatastoreIndex) *datastore.Index {
obj := &datastore.Index{
Ancestor: ProtoToDatastoreIndexAncestorEnum(p.GetAncestor()),
IndexId: dcl.StringOrNil(p.IndexId),
Kind: dcl.StringOrNil(p.Kind),
Project: dcl.StringOrNil(p.Project),
State: ProtoToDatastoreIndexStateEnum(p.GetState()),
}
for _, r := range p.GetProperties() {
obj.Properties = append(obj.Properties, *ProtoToDatastoreIndexProperties(r))
}
return obj
}
// IndexAncestorEnumToProto converts a IndexAncestorEnum enum to its proto representation.
func DatastoreIndexAncestorEnumToProto(e *datastore.IndexAncestorEnum) datastorepb.DatastoreIndexAncestorEnum {
if e == nil {
return datastorepb.DatastoreIndexAncestorEnum(0)
}
if v, ok := datastorepb.DatastoreIndexAncestorEnum_value["IndexAncestorEnum"+string(*e)]; ok {
return datastorepb.DatastoreIndexAncestorEnum(v)
}
return datastorepb.DatastoreIndexAncestorEnum(0)
}
// IndexPropertiesDirectionEnumToProto converts a IndexPropertiesDirectionEnum enum to its proto representation.
func DatastoreIndexPropertiesDirectionEnumToProto(e *datastore.IndexPropertiesDirectionEnum) datastorepb.DatastoreIndexPropertiesDirectionEnum {
if e == nil {
return datastorepb.DatastoreIndexPropertiesDirectionEnum(0)
}
if v, ok := datastorepb.DatastoreIndexPropertiesDirectionEnum_value["IndexPropertiesDirectionEnum"+string(*e)]; ok {
return datastorepb.DatastoreIndexPropertiesDirectionEnum(v)
}
return datastorepb.DatastoreIndexPropertiesDirectionEnum(0)
}
// IndexStateEnumToProto converts a IndexStateEnum enum to its proto representation.
func DatastoreIndexStateEnumToProto(e *datastore.IndexStateEnum) datastorepb.DatastoreIndexStateEnum {
if e == nil {
return datastorepb.DatastoreIndexStateEnum(0)
}
if v, ok := datastorepb.DatastoreIndexStateEnum_value["IndexStateEnum"+string(*e)]; ok {
return datastorepb.DatastoreIndexStateEnum(v)
}
return datastorepb.DatastoreIndexStateEnum(0)
}
// IndexPropertiesToProto converts a IndexProperties resource to its proto representation.
func DatastoreIndexPropertiesToProto(o *datastore.IndexProperties) *datastorepb.DatastoreIndexProperties {
if o == nil {
return nil
}
p := &datastorepb.DatastoreIndexProperties{
Name: dcl.ValueOrEmptyString(o.Name),
Direction: DatastoreIndexPropertiesDirectionEnumToProto(o.Direction),
}
return p
}
// IndexToProto converts a Index resource to its proto representation.
func IndexToProto(resource *datastore.Index) *datastorepb.DatastoreIndex {
p := &datastorepb.DatastoreIndex{
Ancestor: DatastoreIndexAncestorEnumToProto(resource.Ancestor),
IndexId: dcl.ValueOrEmptyString(resource.IndexId),
Kind: dcl.ValueOrEmptyString(resource.Kind),
Project: dcl.ValueOrEmptyString(resource.Project),
State: DatastoreIndexStateEnumToProto(resource.State),
}
for _, r := range resource.Properties {
p.Properties = append(p.Properties, DatastoreIndexPropertiesToProto(&r))
}
return p
}
// ApplyIndex handles the gRPC request by passing it to the underlying Index Apply() method.
func (s *IndexServer) applyIndex(ctx context.Context, c *datastore.Client, request *datastorepb.ApplyDatastoreIndexRequest) (*datastorepb.DatastoreIndex, error) {
p := ProtoToIndex(request.GetResource())
res, err := c.ApplyIndex(ctx, p)
if err != nil {
return nil, err
}
r := IndexToProto(res)
return r, nil
}
// ApplyIndex handles the gRPC request by passing it to the underlying Index Apply() method.
func (s *IndexServer) ApplyDatastoreIndex(ctx context.Context, request *datastorepb.ApplyDatastoreIndexRequest) (*datastorepb.DatastoreIndex, error) {
cl, err := createConfigIndex(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyIndex(ctx, cl, request)
}
// DeleteIndex handles the gRPC request by passing it to the underlying Index Delete() method.
func (s *IndexServer) DeleteDatastoreIndex(ctx context.Context, request *datastorepb.DeleteDatastoreIndexRequest) (*emptypb.Empty, error) {
cl, err := createConfigIndex(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteIndex(ctx, ProtoToIndex(request.GetResource()))
}
// ListDatastoreIndex handles the gRPC request by passing it to the underlying IndexList() method.
func (s *IndexServer) ListDatastoreIndex(ctx context.Context, request *datastorepb.ListDatastoreIndexRequest) (*datastorepb.ListDatastoreIndexResponse, error) {
cl, err := createConfigIndex(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListIndex(ctx, request.Project)
if err != nil {
return nil, err
}
var protos []*datastorepb.DatastoreIndex
for _, r := range resources.Items {
rp := IndexToProto(r)
protos = append(protos, rp)
}
return &datastorepb.ListDatastoreIndexResponse{Items: protos}, nil
}
func createConfigIndex(ctx context.Context, service_account_file string) (*datastore.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return datastore.NewClient(conf), nil
}
|
package net
import "net/http"
import "bytes"
import "io/ioutil"
import "github.com/purstal/go-tieba-modules/operation-analyser/old/log"
//http://www.crifan.com/go_language_http_do_post_pass_post_data/
func Get(srcUrl string, parameters Parameters) (string, error) { //url,parameters
httpClient := http.Client{}
var httpReq *http.Request
dstUrl := srcUrl + "?" + parameters.Encode()
httpReq, _ = http.NewRequest("GET", dstUrl, nil)
println(httpReq)
httpResp, err := httpClient.Do(httpReq)
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
return str, nil
}
return "", err
}
func GetWithCookies(srcUrl string, parameters Parameters, cookies Parameters) (string, error) {
var httpReq *http.Request
dstUrl := srcUrl + "?" + parameters.Encode()
httpReq, _ = http.NewRequest("GET", dstUrl, nil)
httpReq.Header.Add("Cookie", cookies.CookieEncode())
var httpResp *http.Response
var err error
httpClient := http.DefaultClient
httpClient.Timeout = 最长允许响应时间
var i int
for {
httpResp, err = httpClient.Do(httpReq)
if err == nil {
break
} else if 重试次数 < 0 {
log.Loglog("第", i+1, "次获取响应失败,无重试次数上限.", err.Error())
i++
} else if i == 重试次数 {
log.Loglog("第", i+1, "次获取响应失败,到达重试次数上限.返回空响应.", err.Error())
break
} else {
log.Loglog("第", i+1, "次获取响应失败,最多重试", 重试次数, "次.ERROR:", err.Error())
i++
}
}
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
return str, nil
}
return "", err
}
func Post(Url string, parameters Parameters) (string, error) {
httpClient := http.Client{}
var httpReq *http.Request
httpReq, _ = http.NewRequest("POST", Url, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
httpResp, err := httpClient.Do(httpReq)
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
return str, nil
}
return "", err
}
func PostWithCookies(Url string, parameters Parameters) (string, error) {
httpClient := http.Client{}
var httpReq *http.Request
httpReq, _ = http.NewRequest("POST", Url, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
httpReq.Header.Add("Cookie", parameters.CookieEncode())
httpResp, err := httpClient.Do(httpReq)
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
return str, nil
}
return "", err
}
/*
func GetResp(postUrl string, parameters Parameters) string {
println(postUrl)
println(parameters.Encode())
httpClient := http.Client{}
var httpReq *http.Request
if parameters == nil {
httpReq, _ = http.NewRequest("GET", postUrl, nil)
} else {
httpReq, _ = http.NewRequest("POST", postUrl, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
var err error
httpResp, err := httpClient.Do(httpReq)
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
println(str)
return str
}
str = "error"
misc.Log(err.Error())
return str
}
func GetRespByteSliceWithCookies(postUrl string, parameters Parameters, cookies Parameters) []byte {
httpClient := http.Client{}
var httpReq *http.Request
if parameters == nil {
httpReq, _ = http.NewRequest("GET", postUrl, nil)
} else {
httpReq, _ = http.NewRequest("POST", postUrl, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
if cookies != nil {
httpReq.Header.Add("Cookie", "")
for _, cookie := range cookies {
httpReq.AddCookie(&http.Cookie{
Name: cookie.Key,
Value: cookie.Value,
})
}
}
var err error
httpResp, err := httpClient.Do(httpReq)
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
return respBytes
}
misc.Log(err.Error())
return []byte("error")
}
func GetRespWithCookies(postUrl string, parameters Parameters, cookies Parameters) string {
httpClient := http.Client{}
var httpReq *http.Request
if parameters == nil {
httpReq, _ = http.NewRequest("GET", postUrl, nil)
} else {
httpReq, _ = http.NewRequest("POST", postUrl, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
httpReq.Header.Add("Referer", "http://tieba.baidu.com/bawu2/platform/listMember?word=%D4%B5%D6%AE%BB")
if cookies != nil {
httpReq.Header.Add("Cookie", "")
for _, cookie := range cookies {
httpReq.AddCookie(&http.Cookie{
Name: cookie.Key,
Value: cookie.Value,
})
}
}
var err error
httpResp, err := httpClient.Do(httpReq)
var str string
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
str = string(respBytes)
return str
}
str = "error"
misc.Log(err.Error())
return str
}
func GetRespByteSlice(postUrl string, parameters Parameters) []byte {
httpClient := http.Client{}
var httpReq *http.Request
if parameters == nil {
httpReq, _ = http.NewRequest("GET", postUrl, nil)
} else {
httpReq, _ = http.NewRequest("POST", postUrl, bytes.NewReader([]byte(parameters.Encode())))
httpReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
var err error
httpResp, err := httpClient.Do(httpReq)
if err == nil {
defer httpResp.Body.Close()
respBytes, _ := ioutil.ReadAll(httpResp.Body)
return respBytes
}
misc.Log(err.Error())
return []byte("error")
}
*/
|
// test-input project doc.go
/*
test-input document
*/
package main
|
package main // import "github.com/janberktold/redis-autopilot"
import (
"fmt"
"os"
"os/signal"
"time"
log "github.com/sirupsen/logrus"
)
func main() {
if len(os.Args) != 2 {
log.Info("Invoke as redis-autopilot [path to config file]")
os.Exit(1)
}
logger := log.StandardLogger()
configurationFilePath := os.Args[1]
configManager := NewConfigurationManager()
config, changeSignal, err := configManager.Load(configurationFilePath)
if err != nil {
logger.Panicf("Failed to load runtime configuration file: %v", err)
}
fmt.Printf("%+v \n", *config)
redisInstanceProvider, err := NewRedisInstanceProvider(func() string {
return config.RedisAddress
}, changeSignal, logger)
if err != nil {
logger.Panicf("Failed to create RedisInstanceProvider: %v.", err.Error())
}
watcher, err := NewRedisWatcher(redisInstanceProvider, logger, func() time.Duration {
return config.RedisMonitorInterval
})
if err != nil {
logger.Panicf("Failed to create RedisInstanceProvider: %v.", err.Error())
}
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, os.Interrupt)
consulClientProvider, err := NewConsulClientProvider(func() string {
return config.ConsulURL
}, func() string {
return config.ConsulACL
}, changeSignal, logger)
if err != nil {
logger.Panicf("Failed to create ConsulClientProvider: %v.", err.Error())
}
consulServiceRegistrar, err := NewConsulServiceRegistrar(consulClientProvider, func() string {
return config.ConsulServiceName
}, func() time.Duration {
return config.ConsulServiceRegistrationTTL
}, changeSignal)
if err != nil {
logger.Panicf("Failed to create ConsulServiceRegistrar: %v.", err.Error())
}
// TODO: Should load pilot based off config file.
pilot := NewSingleMasterWithSlavesPilot(redisInstanceProvider, consulServiceRegistrar)
loop:
for {
select {
case redisStatus := <-watcher.ChangeChannel():
log.Infof("Interrupt from redis watcher, new status: %v", redisStatus)
pilot.Execute()
case <-time.After(config.PilotExecuteTimeInterval):
log.Info("Interrupt from time delay")
pilot.Execute()
case signal := <-interruptChannel:
log.Infof("We were asked to shutdown %v", signal)
break loop
}
}
pilot.Shutdown()
}
|
package parser
import (
"fmt"
"go/token"
"ligo/lexer"
"ligo/typ"
)
type Node interface {
Type() NodeType
String() string
Copy() Node
Val() typ.Val
}
type NodeType int
func (t NodeType) Type() NodeType {
return t
}
const (
NodeIdent NodeType = iota
NodeString
NodeNumber
NodeCons
NodeVector
)
type IdentNode struct {
NodeType
Ident string
}
func (node *IdentNode) Val() typ.Val {
return typ.NewSymbol(node.Ident)
}
func (node *IdentNode) Copy() Node {
return NewIdentNode(node.Ident)
}
func (node *IdentNode) String() string {
if node.Ident == "nil" {
return "()"
}
return node.Ident
}
type StringNode struct {
NodeType
Value string
}
func (node *StringNode) Val() typ.Val {
return typ.NewString(node.Value)
}
func (node *StringNode) Copy() Node {
return newStringNode(node.Value)
}
func (node *StringNode) String() string {
return node.Value
}
type NumberNode struct {
NodeType
Value string
NumberType token.Token
}
func (node *NumberNode) Val() typ.Val {
return typ.NewInt(node.Value)
}
func (node *NumberNode) Copy() Node {
return &NumberNode{NodeType: node.Type(), Value: node.Value, NumberType: node.NumberType}
}
func (node *NumberNode) String() string {
return node.Value
}
type VectorNode struct {
NodeType
Nodes []Node
}
func (node *VectorNode) Val() typ.Val {
vals := make([]typ.Val, len(node.Nodes))
for i, x := range node.Nodes {
vals[i] = x.Val()
}
return typ.NewVect(vals)
}
func (node *VectorNode) Copy() Node {
vect := &VectorNode{NodeType: node.Type(), Nodes: make([]Node, len(node.Nodes))}
for i, v := range node.Nodes {
vect.Nodes[i] = v.Copy()
}
return vect
}
func (node *VectorNode) String() string {
return fmt.Sprint(node.Nodes)
}
type ConsNode struct {
NodeType
Nodes []Node
}
func (node *ConsNode) Copy() Node {
n := &ConsNode{NodeType: node.Type(), Nodes: make([]Node, len(node.Nodes))}
for i, v := range node.Nodes {
n.Nodes[i] = v.Copy()
}
return n
}
func (node *ConsNode) Val() typ.Val {
vals := make([]typ.Val, len(node.Nodes))
for i, x := range node.Nodes {
vals[i] = x.Val()
}
return typ.NewCons(vals)
}
func (node *ConsNode) Cons() *typ.Cons {
return node.Val().(*typ.Cons)
}
func (node *ConsNode) String() string {
return node.Val().String()
}
var nilNode = NewIdentNode("nil")
func Parse(l *lexer.Lexer) []Node {
return parser(l, make([]Node, 0), ' ')
}
func parser(l *lexer.Lexer, tree []Node, lookingFor rune) []Node {
for item := l.NextItem(); item.Type != lexer.ItemEOF; {
if item.Type == lexer.ItemError {
panic(fmt.Sprintf("Lexer error %s", item.Value))
}
switch t := item.Type; t {
case lexer.ItemIdent:
tree = append(tree, NewIdentNode(item.Value))
case lexer.ItemString:
tree = append(tree, newStringNode(item.Value))
case lexer.ItemInt:
tree = append(tree, newIntNode(item.Value))
case lexer.ItemFloat:
tree = append(tree, newFloatNode(item.Value))
case lexer.ItemComplex:
tree = append(tree, newComplexNode(item.Value))
case lexer.ItemLeftParen:
tree = append(tree, newConsNode(parser(l, make([]Node, 0), ')')))
case lexer.ItemLeftVect:
tree = append(tree, newVectNode(parser(l, make([]Node, 0), ']')))
case lexer.ItemRightParen:
if lookingFor != ')' {
panic(fmt.Sprintf("unexpected \")\" [%d]", item.Pos))
}
return tree
case lexer.ItemRightVect:
if lookingFor != ']' {
panic(fmt.Sprintf("unexpected \"]\" [%d]", item.Pos))
}
return tree
case lexer.ItemError:
println(item.Value)
default:
panic("Bad Item type")
}
item = l.NextItem()
}
return tree
}
func NewIdentNode(name string) *IdentNode {
return &IdentNode{NodeType: NodeIdent, Ident: name}
}
func newStringNode(val string) *StringNode {
return &StringNode{NodeType: NodeString, Value: val}
}
func newIntNode(val string) *NumberNode {
return &NumberNode{NodeType: NodeNumber, Value: val, NumberType: token.INT}
}
func newFloatNode(val string) *NumberNode {
return &NumberNode{NodeType: NodeNumber, Value: val, NumberType: token.FLOAT}
}
func newComplexNode(val string) *NumberNode {
return &NumberNode{NodeType: NodeNumber, Value: val, NumberType: token.IMAG}
}
// We return Node here, because it could be that it's nil
func newConsNode(args []Node) Node {
if len(args) > 0 {
return &ConsNode{NodeType: NodeCons, Nodes: args}
} else {
return nilNode
}
}
func newVectNode(content []Node) *VectorNode {
return &VectorNode{NodeType: NodeVector, Nodes: content}
}
|
// Merging
//
// There are several functions available that handle different kinds of merging:
//
// - MergeNodes(left, right Node) Node: returns a new node that merges children
// from both nodes.
//
// - MergeNodeSlices(left, right Nodes, mergeFn MergeFunction) Nodes: merges
// two slices based on the mergeFn. This allows more advanced merging when
// dealing with slices of nodes.
//
// - MergeDocuments(left, right *Document, mergeFn MergeFunction) *Document:
// creates a new document with their respective nodes merged. You can use
// IndividualBySurroundingSimilarityMergeFunction with this to merge
// individuals, rather than just appending them all.
//
// The MergeFunction is a type that can be received in some of the merging
// functions. The closure determines if two nodes should be merged and what the
// result would be. Alternatively it can also describe when two nodes should not
// be merged.
//
// You may certainly create your own MergeFunction, but there are some that are
// already included:
//
// - IndividualBySurroundingSimilarityMergeFunction creates a MergeFunction that
// will merge individuals if their surrounding similarity is at least
// minimumSimilarity.
//
// - EqualityMergeFunction is a MergeFunction that will return a merged node if
// the node are considered equal (with Equals).
package gedcom
import (
"errors"
"fmt"
)
// MergeFunction will do one of two things:
//
// 1. If the nodes should be merged, it must return a new new node.
//
// 2. If the nodes should not or could not be merged, then nil is returned.
//
// A MergeFunction can be used with MergeNodeSlices.
//
// The document parameter is used as the destination for merged nodes that need
// to be attached to a document (such as individuals).
type MergeFunction func(left, right Node, document *Document) Node
// MergeNodes returns a new node that merges children from both nodes.
//
// If either of the nodes are nil, or they are not the same tag an error will be
// returned and the result node will be nil.
//
// The node returned and all of the merged children will be created as new
// nodes as to not interfere with the original input.
//
// The document must not be nil and will be used to attach the new nodes to
// (since some nodes require a document, such as individuals). You may supply
// the same document.
func MergeNodes(left, right Node, document *Document) (Node, error) {
if IsNil(left) {
return nil, errors.New("left is nil")
}
if IsNil(right) {
return nil, errors.New("right is nil")
}
leftTag := left.Tag()
rightTag := right.Tag()
// We can only proceed if the nodes can be merged.
if !leftTag.Is(rightTag) {
return nil, fmt.Errorf("cannot merge %s and %s nodes",
leftTag.Tag(), rightTag.Tag())
}
r := DeepCopy(left, document)
for _, child := range right.Nodes() {
for _, n := range r.Nodes() {
if n.Equals(child) {
newNodes := MergeNodeSlices(child.Nodes(), n.Nodes(), document,
EqualityMergeFunction)
n.SetNodes(newNodes)
goto next
}
}
r.AddNode(child)
next:
}
return r, nil
}
// MergeNodeSlices merges two slices based on the mergeFn.
//
// The MergeFunction must not be nil, but may return nil. See MergeFunction for
// usage.
//
// The left and right may contain zero elements or be nil, these mean the same
// thing.
//
// MergeNodeSlices makes some guarantees about the result:
//
// 1. The result slice will contain at least the length of the greatest length
// of left and right. So if len(left) = 3 and len(right) = 5 then the result
// slice will have a minimum of 5 items. If both slices are empty or nil the
// minimum length will also be zero.
//
// 2. The result slice will not contain more elements than the sum of the
// lengths of the left and right. So if len(left) = 3 and len(right) = 5 then
// the largest possible slice returned is 8.
//
// 3. All of the nodes returned will be deep copies of the original nodes so it
// is safe to manipulate the result in any way without affecting the original
// input slices.
//
// 4. Any element from the left or right may only be merged once. That is to say
// that if a merge happens between a left and right node that the result node
// cannot be merged again.
//
// 5. Merges can only happen between a node on the left with a node on the
// right. Even if two nodes in the left could be merged they will not be. The
// same goes for all of the elements in the right slice.
//
// The document must not be nil and will be used to attach the new nodes to
// (since some nodes require a document, such as individuals). You may supply
// the same document.
func MergeNodeSlices(left, right Nodes, document *Document, mergeFn MergeFunction) Nodes {
newSlice := Nodes{}
// Be careful to duplicate the right slice. I'm not sure why this is
// necessary but the unit tests that reuse mergeDocumentsTests will fail if
// we do not have this.
newRight := make(Nodes, len(right))
copy(newRight, right)
right = newRight
// We start by adding all of the items on the left.
for _, node := range left {
newSlice = append(newSlice, DeepCopy(node, document))
}
// Each of the items on the right must be compared with all of the items in
// the slice, which starts off with all the items from the left but will
// grow.
//
// If the right item does not match anything previously seen then it is
// appended to the end. Otherwise the left node is removed and the new
// merged node is place on the end of the new slice.
//
// We can guarantee that all the items on the left will be inserted once.
// However, one obvious problem is that the items on the right may be merged
// multiple times into a single left element, or even be merged into an
// element previously appended from the right.
//
// To get around this behavior we have to keep track of when a node has
// already been replaced with a merged one. This gives us the same
// guarantees for the items on the right.
alreadyMerged := NodeSet{}
for len(right) > 0 {
found := false
for i := 0; i < len(newSlice); i++ {
node := newSlice[i]
// Skip any nodes that were previously marked as merged.
if alreadyMerged.Has(node) {
goto next
}
for j, node2 := range right {
merged := mergeFn(node, node2, document)
if !IsNil(merged) {
// Remove the current node, and append the new merged one.
// This will change the order, but the order of nodes
// doesn't matter in GEDCOM files.
newSlice = append(newSlice[:i], newSlice[i+1:]...)
newSlice = append(newSlice, merged)
// We also have to remove the matching right node, otherwise
// we may get stuck into an infinite loop.
right = append(right[:j], right[j+1:]...)
// Record the fact that this new node has been merged so it
// will be avoided next time, even in the case of a merge
// candidate.
alreadyMerged.Add(merged)
found = true
break
}
}
next:
}
if !found {
// See the comment above about why we need to mark the new right
// node as already merged.
newNode := DeepCopy(right[0], document)
alreadyMerged.Add(newNode)
newSlice = append(newSlice, newNode)
right = right[1:]
}
}
return newSlice
}
// MergeDocuments creates a new document with their respective nodes merged.
//
// The MergeFunction must not be nil, but may return nil. See MergeFunction for
// usage.
//
// The left and right may be nil. This is treated as an empty document.
//
// The result document will have a deep copy of all nodes. So it's safe to
// manipulate the nodes without affecting the original nodes.
//
// Individuals will not be merged amongst each other, only appended to the final
// document. To merge similar individuals see MergeDocumentsAndIndividuals.
//
// The document (third parameter) must not be nil and will be used to attach the
// new nodes to (since some nodes require a document, such as individuals). You
// may supply the same document.
func MergeDocuments(left, right *Document, document *Document, mergeFn MergeFunction) *Document {
var leftNodes, rightNodes Nodes
if left != nil {
leftNodes = left.Nodes()
}
if right != nil {
rightNodes = right.Nodes()
}
newNodes := MergeNodeSlices(leftNodes, rightNodes, document, mergeFn)
return NewDocumentWithNodes(newNodes)
}
// MergeDocumentsAndIndividuals merges two documents while also merging similar
// individuals. A new document will be returned.
//
// The MergeFunction must not be nil, but may return nil. It will only be used
// for nodes that are not individuals. See MergeFunction for usage.
//
// The options must be provided.
func MergeDocumentsAndIndividuals(left, right *Document, mergeFn MergeFunction, options *IndividualNodesCompareOptions) (*Document, error) {
// Merge individuals.
leftIndividuals := individuals(left)
rightIndividuals := individuals(right)
document := NewDocument()
mergedIndividuals, err := leftIndividuals.Merge(rightIndividuals, document, options)
if err != nil {
return nil, err
}
// Merge non-individuals.
leftOther := nonIndividuals(left)
rightOther := nonIndividuals(right)
mergedOther := MergeNodeSlices(leftOther, rightOther, document, mergeFn)
allNodes := append(mergedIndividuals.Nodes(), mergedOther...)
return NewDocumentWithNodes(allNodes), nil
}
// IndividualBySurroundingSimilarityMergeFunction creates a MergeFunction that
// will merge individuals if their surrounding similarity is at least
// minimumSimilarity.
//
// If either of the nodes are not IndividualNode instances then it will always
// return nil so they will not be merged.
//
// The minimumSimilarity should be value between 0.0 and 1.0. The options must
// not be nil, you should use NewSimilarityOptions() for sensible defaults.
//
// The document must not be nil and will determine where the merged nodes will
// end up. This is important for nodes that need to be attached to a document.
func IndividualBySurroundingSimilarityMergeFunction(minimumSimilarity float64, options SimilarityOptions) MergeFunction {
return func(left, right Node, document *Document) Node {
leftIndividual, leftOK := left.(*IndividualNode)
rightIndividual, rightOK := right.(*IndividualNode)
// Either side is not an individual.
if !leftOK || !rightOK {
return nil
}
similarity := leftIndividual.SurroundingSimilarity(rightIndividual, options, false)
if similarity.WeightedSimilarity() > minimumSimilarity {
// Ignore the error here because left and right must be the same
// type.
mergedIndividuals, _ := MergeNodes(left, right, document)
return mergedIndividuals
}
// Do not merge.
return nil
}
}
// EqualityMergeFunction is a MergeFunction that will return a merged node if
// the node are considered equal (with Equals). This is probably the most
// generic case when doing general merges.
//
// The document must not be nil and will be used to attach the merged nodes as
// some nodes need a document to be created (such as Individual nodes).
func EqualityMergeFunction(left, right Node, document *Document) Node {
if left.Equals(right) {
merged, err := MergeNodes(left, right, document)
if err != nil {
return nil
}
return merged
}
return nil
}
|
/*
* @lc app=leetcode.cn id=17 lang=golang
*
* [17] 电话号码的字母组合
*/
// @lc code=start
package main
import "fmt"
import "strings"
func main() {
var s string
var res []string
s = "29"
res = letterCombinations(s)
fmt.Printf("digits is %s, result is %v, len is %d\n", s, res, len(res))
s = "239"
res = letterCombinations(s)
fmt.Printf("digits is %s, result is %v, len is %d\n", s, res, len(res))
}
func letterCombinations(digits string) []string {
if len(digits) == 0 {
return []string{}
}
mapPhones := map[byte]string{
'2': "abc",
'3': "def",
'4': "ghi",
'5': "jkl",
'6': "mno",
'7': "pqrs",
'8': "tuv",
'9': "wxyz",
}
collectStr := []string{}
for i := 0 ; i < len(digits) ; i++ {
tmp, _ := mapPhones[digits[i]]
collectStr = append(collectStr, tmp)
}
fmt.Printf("collectStr are %v\n", collectStr)
finalRes := strings.Split(collectStr[0], "")
fmt.Printf("1: finalRes are %v\n", finalRes)
collectStr = collectStr[1:]
for len(collectStr) > 0 {
tmpRes := []string{}
tmpS := collectStr[0]
for _, midiumS := range finalRes {
for i := 0 ; i < len(tmpS) ; i++ {
tmpRes = append(tmpRes, midiumS + string(tmpS[i]))
}
}
collectStr = collectStr[1:]
finalRes = tmpRes
}
return finalRes
}
// @lc code=end
|
package main
import (
"fmt"
"time"
)
type ServerStruct struct {
channel_subscriber chan Sub
channel_publisher chan Message
}
type Sub struct {
topic string
news chan string
}
type Message struct {
topic string
body string
}
func Server(server ServerStruct) {
subscribedTopics := make(map[string] []chan string)
var sub Sub
var msg Message
for {
select {
case sub = <- server.channel_subscriber:
manageTopic(sub, subscribedTopics)
fmt.Printf("added new subscriber to topic %s\n", sub.topic)
case msg = <- server.channel_publisher:
sendNews(msg, subscribedTopics)
fmt.Printf("send topic %s to all subscribers\n", sub.topic)
}
}
}
func manageTopic(sub Sub, subscribedTopics map[string][]chan string) {
if val, ok := subscribedTopics[sub.topic]; ok {
//fmt.Printf("adding new subscriber to topic %s, actualLength %d", sub.topic, len(val))
val = append(val, sub.news)
//fmt.Printf(" new Length %d\n", len(val))
subscribedTopics[sub.topic] = val
} else {
//fmt.Printf("adding new topic %s\n", sub.topic)
var array [] chan string
array = append(array, sub.news)
subscribedTopics[sub.topic] = array
}
}
func sendNews(msg Message, subscribedTopics map[string][]chan string) {
array, ok := subscribedTopics[msg.topic]
var counter int = 1
if ok {
//fmt.Printf("\n---\nArray size %d for topic %s\n---\n", len(array), msg.topic)
for _, channel := range array {
channel <- msg.body
fmt.Printf("Message send to %d subscribers\n", counter)
counter++
}
}
}
func Client_Publish(channel chan Message, msg []Message) {
//fmt.Printf("Publisher got array with length: %d\n", len(msg))
for _, messages := range msg {
// fmt.Printf("Sending topic %s message %s\n", messages.topic, messages.body)
channel <- messages
time.Sleep(5 * 1e9)
}
}
func Client_Subscribe(channel chan Sub, topic string, id int) {
var listen = make(chan string)
var sub = Sub{topic, listen}
channel <- sub
fmt.Printf("ID %d Subscribed for topic %s\n", id, topic)
for {
var news = <- listen
fmt.Printf("ID %d got news: %s\n\n",id, news)
}
}
func main() {
var messageChannel = make(chan Message)
var subscriberChannel = make(chan Sub)
var server = ServerStruct{subscriberChannel, messageChannel}
var messages1 [] Message
messages1 = initMessagesPub1(messages1)
var messages2 [] Message
messages2 = initMessagesPub2(messages2)
go Server(server)
go Client_Subscribe(server.channel_subscriber, "go",1)
go Client_Subscribe(server.channel_subscriber, "gym",2)
go Client_Subscribe(server.channel_subscriber, "car",3)
go Client_Subscribe(server.channel_subscriber, "gym",4)
go Client_Publish(server.channel_publisher, messages1)
go Client_Publish(server.channel_publisher, messages2)
for {
time.Sleep(1*1e9)
}
}
func initMessagesPub1(messages [] Message) [] Message {
var msg1 = Message{"go", "Learning Go"}
var msg2 = Message{"go", "Testing Go Code"}
var msg3 = Message{"gym", "Take proteins"}
var msg4 = Message{"gym", "Lift weights"}
var msg5 = Message{"car", "Drive"}
var msg6 = Message{"car", "Stop"}
messages = append(messages, msg1, msg2, msg3, msg4, msg5, msg6)
return messages
}
func initMessagesPub2(messages [] Message) [] Message {
var msg1 = Message{"gym", "Do 8 reps"}
var msg2 = Message{"gym", "Do 3 sets"}
messages = append(messages, msg1, msg2)
return messages
} |
package router
import (
"BackendGo/middlewares"
"BackendGo/server"
"github.com/gin-gonic/gin"
"net/http"
)
func ApplyRoutes(s *server.Server) error {
s.Router.GET("/health", func(c *gin.Context) {
c.String(http.StatusOK, "OK")
})
s.Router.POST("/login", loginUser(s))
s.Router.GET("/query", middlewares.JwtAuth(), queries(s))
s.Router.GET("/query/:exporter", middlewares.JwtAuth(), queryExporter(s))
s.Router.GET("/JWTAuthTest", middlewares.JwtAuth(), func(c *gin.Context) {
c.String(http.StatusOK, "OK")
})
return nil
}
|
package main
import (
"fmt"
log "github.com/Sirupsen/logrus"
"net/http"
//"path"
"time"
"bitbucket.org/cicadaDev/utils"
"github.com/dgrijalva/jwt-go"
"github.com/markbates/goth"
"github.com/markbates/goth/providers/gplus"
"github.com/markbates/goth/providers/linkedin"
"github.com/zenazn/goji/web"
)
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleStatic(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleStatic")
dir := "/usr/share/ninja/www/static/public"
fs := maxAgeHandler(600, http.FileServer(http.Dir(dir)))
cleanFiles := noDirListing(dir, fs)
files := http.StripPrefix("/assets/", cleanFiles)
files.ServeHTTP(res, req)
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleIndex(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleIndex")
sidcookie, err := createSessionID()
if err != nil {
log.Errorf("error creating sid %s", err.Error())
utils.JsonErrorResponse(res, fmt.Errorf(http.StatusText(http.StatusInternalServerError)), http.StatusInternalServerError)
return
}
http.SetCookie(res, sidcookie)
indexTemplate.Execute(res, nil)
//http.ServeFile(res, req, "/usr/share/ninja/www/static/public/index.html")
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleDocs(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleDocs")
docsTemplate.Execute(res, nil)
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleLoginSuccess(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleLoginSuccess")
http.ServeFile(res, req, "/usr/share/ninja/www/static/public/success.html")
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleAccountPage(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleAccountPage")
accountTemplate.Execute(res, nil)
//http.ServeFile(res, req, "/usr/share/ninja/www/static/auth/accounts.html")
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleFeedback(c web.C, res http.ResponseWriter, req *http.Request) {
db, err := GetDbType(c)
utils.Check(err)
userID := c.Env["jwt-userid"].(string)
msgUser := &userModel{}
//if user not found (shouldn't happen unless token validtion fail!)
if ok, _ := db.FindById("users", userID, &msgUser); !ok {
log.WithField("userid", userID).Warnln("user not found")
utils.JsonErrorResponse(res, fmt.Errorf(http.StatusText(http.StatusUnauthorized)), http.StatusUnauthorized)
return
}
var fbmsg map[string]string
if err := utils.ReadJson(req, &fbmsg); err != nil {
log.Errorf("error read json: %s", err.Error())
utils.JsonErrorResponse(res, fmt.Errorf("message json malformed"), http.StatusBadRequest)
return
}
//TODO: whitelist validator!
/*if !govalidator.IsUTFLetterNumeric(fbmsg["msg"]) {
log.Printf("[ERROR] feedback message not valid")
utils.JsonErrorResponse(res, fmt.Errorf("message not valid"), http.StatusBadRequest)
return
}*/
//hardcoded to make sure nothing else slips through
var msgType string
switch fbmsg["fbtype"] {
case "Suggestion":
msgType = "Suggestion"
case "Report a bug":
msgType = "Report a bug"
case "Complaint":
msgType = "Complaint"
case "Question":
msgType = "Question"
default:
log.WithField("type", fbmsg["fbtype"]).Warnln("feedback type not found")
utils.JsonErrorResponse(res, fmt.Errorf("not valid type"), http.StatusBadRequest)
return
}
userRequest := fmt.Sprintf("%s - %s", req.RemoteAddr, req.UserAgent())
go emailFeedBack.Send(msgType, msgUser.User.Name, msgUser.Email, msgUser.SubPlan, userRequest, fbmsg["msg"]) //send concurrently
receipt := map[string]string{"status": "success", "time": time.Now().String()}
err = utils.WriteJson(res, receipt, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handlePassSample(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handlePassSample")
var templateID string
db, err := GetDbType(c)
utils.Check(err)
passType := c.URLParams["passType"]
switch passType {
case "boardingPass":
templateID = "pass.ninja.pass.template.boardingPass"
case "coupon":
templateID = "pass.ninja.pass.template.coupon"
case "eventTicket":
templateID = "pass.ninja.pass.template.eventTicket"
case "storeCard":
templateID = "pass.ninja.pass.template.storeCard"
case "generic":
templateID = "pass.ninja.pass.template.generic"
default:
log.WithField("type", passType).Warnln("pass type not found")
utils.JsonErrorResponse(res, fmt.Errorf("pass not found"), http.StatusNotFound)
return
}
var newPass pass
if ok, _ := db.FindById("passTemplate", templateID, &newPass); !ok {
log.WithField("type", templateID).Warnln("pass type not found in DB")
utils.JsonErrorResponse(res, fmt.Errorf("pass not found"), http.StatusNotFound)
return
}
newPass.Id = "" // a new pass needs a new clear id
newPass.PassType = passType
newPass.Status = "1" //first page complete
newPass.KeyDoc.FormatVersion = 1 //apple says: always set to 1
err = utils.WriteJson(res, newPass, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleGetPassLink(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleGetPassLink")
passData := c.Env["passData"].(pass) //get pass from passIDVerify middleware
if passData.Status != "ready" {
log.WithField("pass", passData.Name).Warnln("requested pass is not ready for distribution")
utils.JsonErrorResponse(res, fmt.Errorf("requested pass is incomplete"), http.StatusForbidden)
return
}
passURL := downloadServer + passData.FileName //path.Join(downloadServer, passData.FileName)
log.WithField("url", passURL).Debugln("pass download url")
receipt := map[string]string{"name": passData.Name, "url": passURL}
err := utils.WriteJson(res, receipt, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleGetMutateList(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleGetMutateList")
passData := c.Env["passData"].(pass) //get pass from passIDVerify middleware
if passData.Status != "api" {
log.WithField("pass", passData.Name).Warnln("requested pass is not ready for distribution")
utils.JsonErrorResponse(res, fmt.Errorf("requested pass is incomplete"), http.StatusForbidden)
return
}
log.WithField("list", passData.MutateList).Debugln("list of mutate items")
type receiptData struct {
Name string `json:"name"`
Mutatelist []string `json:"mutatelist"`
}
receipt := &receiptData{}
receipt.Name = passData.Name
receipt.Mutatelist = passData.MutateList
utils.DebugPrintJson(receipt)
//receipt := map[string]string{"name": passData.Name, "mutatelist": passData.MutateList}
err := utils.WriteJson(res, receipt, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleMutatePass gets a json list of key/values that correspond to key/values in
// the pass data. Allowing the user to update field data before issuing the pass.
//
//////////////////////////////////////////////////////////////////////////
func handleMutatePass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleMutatePass")
db, err := GetDbType(c)
utils.Check(err)
passData := c.Env["passData"].(pass) //get pass from passIDVerify middleware
//pass ready to be mutated? Or of the wrong type
if passData.Status != "api" {
log.WithField("pass", passData.Name).Warnln("requested pass is not ready or configurable")
utils.JsonErrorResponse(res, fmt.Errorf("requested pass is incomplete or not mutatable"), http.StatusBadRequest)
return
}
var customVars map[string]value //a map of custom variables to change in the pass
//read json doc of variables to change
if err := utils.ReadJson(req, &customVars); err != nil {
log.Errorf("read json error %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("The submitted pass JSON structure is malformed"), http.StatusBadRequest)
return
}
//swap in variable values from req into variable placeholders in pass
if updatePassVariables(&passData, customVars) != nil {
utils.JsonErrorResponse(res, err, http.StatusBadRequest)
return
}
newPassNum := utils.GenerateFnvHashID(passData.Name, time.Now().String())
newPassID := fmt.Sprintf("%x", newPassNum)
passData.Id = newPassID
passData.FileName = passData.FileName + "-" + newPassID
err = db.Add("passMutate", passData)
if err != nil { //passMutate table holds mutated ready passes for download.
log.Errorf("adding to table:passMutate %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("a conflict has occurred creating the pass"), http.StatusConflict)
return
}
passURL := downloadServer + passData.FileName //path.Join(downloadServer, passData.FileName)
log.WithField("url", passURL).Debugln("pass download url")
receipt := map[string]string{"name": passData.Name, "url": passURL} //should return a unique serial so it can be accessed again?
err = utils.WriteJson(res, receipt, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleGetPass returns the pass data json document with a matching ID
//
//
//////////////////////////////////////////////////////////////////////////
func handleGetPass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleGetPass")
//get pass from passIDVerify middleware. Will only return passes that are owned by the req user
passData := c.Env["passData"].(pass)
err := utils.WriteJson(res, passData, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleGetAllPass returns all pass data objects for the user.
//
//
//////////////////////////////////////////////////////////////////////////
func handleGetAllPass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleGetAllPass")
db, err := GetDbType(c)
utils.Check(err)
//The Jwt lists the user Id. Use it as one of the seeds for the pass token id
userID := c.Env["jwt-userid"].(string)
passList := []pass{}
log.WithField("userid", userID).Debugln("get all pass of user")
filter := map[string]string{"field": "userid", "value": userID}
//found false continues with empty struct. Error returns error message.
if ok, err := db.FindAllEq("pass", filter, &passList); !ok {
if err != nil {
log.Errorf("db findAllEq %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("an error has occurred retrieving pass data"), http.StatusInternalServerError)
return
}
}
err = utils.WriteJson(res, passList, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleCreatePass creates a new empty pass in the db and returns its id
//
//
//////////////////////////////////////////////////////////////////////////
func handleCreatePass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleCreatePass")
db, err := GetDbType(c)
utils.Check(err)
//The Jwt lists the user Id. Use it as one of the seeds for the pass token id
userID := c.Env["jwt-userid"].(string)
newPass := c.Env["passInput"].(pass) //get the input fragment data from passReadVerify middleware
passUser := &userModel{}
//pass is new, generate a token id
newPass.Id = utils.GenerateToken(passTokenKey, newPass.Name, userID) //get id as token from base64 hmac
newPass.Updated = time.Now()
newPass.UserId = userID
newPass.FileName = generateFileName(newPass.Name)
//set pass limit remain
if ok, _ := db.FindById("users", userID, &passUser); !ok {
log.WithField("userid", userID).Warnf("user not found")
utils.JsonErrorResponse(res, fmt.Errorf(http.StatusText(http.StatusUnauthorized)), http.StatusUnauthorized)
return
}
newPass.PassRemain = passUser.SubPlan //count down from limit
log.Println(userID)
err = db.Add("pass", newPass)
if err != nil {
log.WithField("pass", newPass.Name).Errorf("error adding pass to db %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("a conflict has occurred creating the pass"), http.StatusConflict)
return
}
receipt := map[string]string{"id": newPass.Id, "time": newPass.Updated.String()}
err = utils.WriteJsonStatus(res, http.StatusCreated, receipt, false)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleDeletePass deletes a pass with the matching id from the db.
//
//////////////////////////////////////////////////////////////////////////
func handleDeletePass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleDeletePass")
db, err := GetDbType(c)
utils.Check(err)
//get pass from passIDVerify middleware. Will only return passes that are owned by the req user
passData := c.Env["passData"].(pass)
err = db.DelById("pass", passData.Id)
if err != nil {
log.WithField("pass", passData.Name).Errorf("error deleting pass from db %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("Pass not found"), http.StatusNotFound)
return
}
//receipt := map[string]string{"id": passData.Id, "time": time.Now().String()}
//err = utils.WriteJson(res, receipt, true)
//utils.Check(err)
//successfully deleted pass, return status 204
res.WriteHeader(http.StatusNoContent)
return
}
//////////////////////////////////////////////////////////////////////////
//
// handleUpdatePass recieves partial pass info and merges it into the pass data
// with a matching id.
//
//////////////////////////////////////////////////////////////////////////
func handleUpdatePass(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleUpdatePass")
db, err := GetDbType(c)
utils.Check(err)
passInputFrag := c.Env["passInput"].(pass) //get the input fragment data from passReadVerify middleware
//userID := c.Env["jwt-userid"].(string)
//read the frag check for a mutateList, if there append it from the previous mutate list.
if len(passInputFrag.MutateList) > 0 {
passData := c.Env["passData"].(pass) //get the whole pass data doc from passIDVerify middleware
passInputFrag.MutateList = append(passData.MutateList, passInputFrag.MutateList...) //appending arrays on update in rethinkdb is troublesome. Append here instead.
}
//TODO: set status to "ready" here rather than in frontend. Also finalize all required data
//if passInputFrag.Status == "ready" || passInputFrag.Status == "api" {
//generateFileName makes a unique Id for the pass file name
//passInputFrag.FileName = generateFileName(passInputFrag.Name) //!!FIXME: only generate 1 time if pass changes. use a hash to see change.
//}
if ok, err := db.Merge("pass", passInputFrag.Id, true, passInputFrag); !ok {
if err != nil {
log.WithField("pass", passInputFrag.Name).Errorf("error merging pass in db %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("a conflict has occurred updating the pass"), http.StatusConflict)
return
}
//unchanged
log.WithField("pass", passInputFrag.Name).Debugln("pass unchanged")
res.WriteHeader(http.StatusNotModified)
return
}
//TODO: this sucks, is there another way? Not to update twice?
//if modified, update the modified time
//var modPassTime pass
//modPassTime.Id = passInputFrag.Id
//modPassTime.Name = passInputFrag.Name
//modPassTime.Updated = time.Now()
//_, err = db.Merge("pass", "id", modPassTime.Id, modPassTime)
//if err != nil {
// log.Printf("[ERROR] %s - merging pass: %s to db", err, passInputFrag.Name)
// utils.JsonErrorResponse(res, fmt.Errorf("a conflict has occurred updating the pass"), http.StatusConflict)
// return
//}
receipt := map[string]string{"id": passInputFrag.Id, "time": time.Now().Format(time.RFC3339)} //update the timestamp
err = utils.WriteJson(res, receipt, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
//////////////////////////////////////////////////////////////////////////
//
// handleLogin uses oauth to link a provider account by register or login.
// The function returns a JWT.
//
//////////////////////////////////////////////////////////////////////////
func handleLogin(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleLogin")
db, err := GetDbType(c)
utils.Check(err)
//get matching provider from url (gplus,facebook,etc)
provider, err := goth.GetProvider(c.URLParams["provider"])
if err != nil {
log.Errorf("provider oauth error %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("ninja fail, bad request"), http.StatusBadRequest)
return
}
var sess goth.Session
if provider.Name() == "gplus" {
sess = &gplus.Session{}
} else if provider.Name() == "linkedin" {
sess = &linkedin.Session{}
}
//verify oauth state is same as session id. protect against cross-site request forgery
if ok := verifyState(req); !ok {
log.WithField("referer", req.Referer()).Warnln("verifying oauth state - failed")
utils.JsonErrorResponse(res, fmt.Errorf("ninja fail, bad request"), http.StatusBadRequest)
return
}
//1. Exchange authorization code for access token
_, err = sess.Authorize(provider, req.URL.Query())
if err != nil {
log.WithField("provider", provider.Name()).Errorf("session authorize error %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("ninja fail, bad request"), http.StatusBadRequest)
return
}
//2. fetch user info
user, err := provider.FetchUser(sess)
if err != nil {
log.WithField("provider", provider.Name()).Errorf("fetch user info oauth error %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("ninja fail, server error"), http.StatusInternalServerError)
return
}
var newUser userModel
jwtoken, err := jwt.ParseFromRequest(req, func(token *jwt.Token) (interface{}, error) {
return jWTokenKey, nil
})
if err == nil && jwtoken.Valid { //3a. Link user accounts - if user not already logged in
if ok, err := db.FindById("users", jwtoken.Claims["sub"].(string), &newUser); !ok { //if user not found
if err != nil {
log.WithField("userid", jwtoken.Claims["sub"].(string)).Errorf("error finding user in db %s", err)
} else {
log.WithField("userid", jwtoken.Claims["sub"].(string)).Warnln("user not found in db")
}
utils.JsonErrorResponse(res, fmt.Errorf("user not found"), http.StatusBadRequest)
return
}
tokenMap, err := createJWToken("token", jWTokenKey, newUser.ID)
if err != nil {
log.WithField("userid", newUser.ID).Errorf("error creating jwt %s", err)
}
err = utils.WriteJson(res, tokenMap, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
} else { //3b. Create a new user account or return an existing one.
if ok, err := db.FindById("users", user.UserID, &newUser); !ok { //if user not found
if err != nil {
log.WithField("userid", user.UserID).Errorf("error finding user in db %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("Internal error"), http.StatusInternalServerError)
return
}
//add new user
newUser.ID = user.UserID
newUser.Email = user.Email
newUser.OAuthProvider = provider.Name()
newUser.User = user //all details from oauth login
newUser.Subscriber = false
newUser.SubPlan = FreePlan
newUser.Created = time.Now()
newUser.LastLogin = time.Now()
err := db.Add("users", newUser)
if err != nil {
log.WithField("userid", newUser.ID).Errorf("error adding user to db %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("this user account already exists"), http.StatusConflict)
return
}
}
//http.Redirect(res, req, "/accounts/", http.StatusFound)
tokenMap, err := createJWToken("token", jWTokenKey, newUser.ID)
if err != nil {
log.WithField("userid", newUser.ID).Errorf("error creating jwt %s", err)
}
err = utils.WriteJson(res, tokenMap, true)
if err != nil {
log.Errorf("error writing json to response %s", err)
}
}
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func handleUnlink(c web.C, res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Debugln("handleUnlink")
db, err := GetDbType(c)
utils.Check(err)
jwtoken, err := jwt.ParseFromRequest(req, func(token *jwt.Token) (interface{}, error) {
return jWTokenKey, nil
})
if err != nil || !jwtoken.Valid {
log.WithField("userid", jwtoken.Claims["sub"].(string)).Warnln("user account not linked")
utils.JsonErrorResponse(res, fmt.Errorf("oauth provider unlink failed"), 401)
return
}
var newUser userModel
if ok, err := db.FindById("users", jwtoken.Claims["sub"].(string), &newUser); !ok { //if user not found
if err != nil {
log.WithField("userid", jwtoken.Claims["sub"].(string)).Errorf("error finding user in db %s", err)
} else {
log.WithField("userid", jwtoken.Claims["sub"].(string)).Warnln("user not found in db - cannot unlink")
}
utils.JsonErrorResponse(res, fmt.Errorf("oauth provider unlink failed"), http.StatusBadRequest)
return
}
accessToken := newUser.User.AccessToken
//Execute HTTP GET request to revoke current accessToken
url := "https://accounts.google.com/o/oauth2/revoke?token=" + accessToken
resp, err := http.Get(url)
if err != nil {
log.WithField("userid", newUser.ID).Errorf("error failed to revoke access token %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("oauth provider unlink failed"), 400)
return
}
defer resp.Body.Close()
//clear user provider data, but keep the user
newUser.OAuthProvider = ""
newUser.User = goth.User{}
_, err = db.Merge("users", newUser.ID, false, newUser)
if err != nil {
log.WithField("userid", newUser.ID).Errorf("db Merge Error %s", err)
utils.JsonErrorResponse(res, fmt.Errorf("oauth provider unlink failed"), http.StatusInternalServerError)
return
}
res.WriteHeader(http.StatusNoContent) //successfully unlinked from oauth
return
}
//////////////////////////////////////////////////////////////////////////
//
// handleNotFound is a 404 handler.
//
//
//////////////////////////////////////////////////////////////////////////
func handleNotFound(res http.ResponseWriter, req *http.Request) {
log.WithField("url", req.URL.Path[1:]).Warnln("page not found")
res.Header().Set("Content-Type", "text/html; charset=utf-8")
res.WriteHeader(http.StatusNotFound)
notFoundTemplate.Execute(res, nil)
}
|
package chme
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/go-chi/chi"
)
var endpoint = "/chme"
func TestChangePostToHiddenMethodOfDefault(t *testing.T) {
r := newTestRouter()
r.Use(ChangePostToHiddenMethod)
r.registerRoute()
ts := httptest.NewServer(r)
defer ts.Close()
tests := []struct {
method string
path string
body url.Values
expectedStatusCode int
expectedBody string
}{
{
http.MethodGet,
endpoint,
url.Values{},
http.StatusOK,
"get",
},
{
http.MethodPost,
endpoint,
url.Values{},
http.StatusOK,
"post",
},
{
http.MethodPost,
endpoint,
url.Values{
"_method": {"PUT"},
},
http.StatusOK,
"put",
},
{
http.MethodPost,
endpoint,
url.Values{
"_method": {"PATCH"},
},
http.StatusOK,
"patch",
},
{
http.MethodPost,
endpoint,
url.Values{
"_method": {"DELETE"},
},
http.StatusOK,
"delete",
},
}
for _, test := range tests {
resp, body := testRequest(t, ts, test.method, test.path, strings.NewReader(test.body.Encode()))
if resp.StatusCode != test.expectedStatusCode {
t.Errorf("unexpected status code: got %d, but expected %d\n", resp.StatusCode, test.expectedStatusCode)
}
if body != test.expectedBody {
t.Errorf("unexpected body: got %s, but expected %s\n", body, test.expectedBody)
}
}
}
func TestChangePostToHiddenMethodOfUserDefined(t *testing.T) {
r := newTestRouter()
r.Use(NewChme("other").ChangePostToHiddenMethod)
r.registerRoute()
ts := httptest.NewServer(r)
defer ts.Close()
tests := []struct {
method string
path string
body url.Values
expectedStatusCode int
expectedBody string
}{
{
http.MethodGet,
endpoint,
url.Values{},
http.StatusOK,
"get",
},
{
http.MethodPost,
endpoint,
url.Values{},
http.StatusOK,
"post",
},
{
http.MethodPost,
endpoint,
url.Values{
"other": {"PUT"},
},
http.StatusOK,
"put",
},
{
http.MethodPost,
endpoint,
url.Values{
"other": {"PATCH"},
},
http.StatusOK,
"patch",
},
{
http.MethodPost,
endpoint,
url.Values{
"other": {"DELETE"},
},
http.StatusOK,
"delete",
},
}
for _, test := range tests {
resp, body := testRequest(t, ts, test.method, test.path, strings.NewReader(test.body.Encode()))
if resp.StatusCode != test.expectedStatusCode {
t.Errorf("unexpected status code: got %d, but expected %d\n", resp.StatusCode, test.expectedStatusCode)
}
if body != test.expectedBody {
t.Errorf("unexpected body: got %s, but expected %s\n", body, test.expectedBody)
}
}
}
func testRequest(t *testing.T, ts *httptest.Server, method, path string, body io.Reader) (*http.Response, string) {
req, err := http.NewRequest(method, ts.URL+path, body)
if err != nil {
t.Fatalf("failed to test: faield to create new request: %s\n", err)
}
if method == http.MethodPost {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("failed to test: failed to request: %s\n", err)
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to test: failed to read all of response body: %s\n", err)
}
return resp, string(respBody)
}
type testRouter struct {
chi.Router
}
func newTestRouter() *testRouter {
return &testRouter{
Router: chi.NewRouter(),
}
}
func (r *testRouter) registerRoute() {
r.Get(endpoint, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("get"))
})
r.Post(endpoint, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("post"))
})
r.Put(endpoint, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("put"))
})
r.Patch(endpoint, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("patch"))
})
r.Delete(endpoint, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("delete"))
})
}
|
package nhs
import (
"encoding/xml"
"fmt"
"github.com/aquinofb/location_service/http_client"
"github.com/aquinofb/location_service/models"
"os"
"strings"
)
type Result struct {
Entry Entry `xml:"entry"`
}
type Entry struct {
Id string `xml:"id"`
Content struct {
Service struct {
Name string `xml:"deliverer"`
Type struct {
Code string `xml:"code,attr"`
} `xml:"type"`
GeographicCoordinates struct {
Lat string `xml:"latitude"`
Lng string `xml:"longitude"`
} `xml:"geographicCoordinates"`
} `xml:"service"`
} `xml:"content"`
}
type ResultPostcodeXML struct {
Entries []Entry `xml:"entry"`
}
func ServicesFinder(serviceType, serviceId string) models.Location {
data, _ := http_client.Get(buildNHSServicesUri(serviceType, serviceId))
result := Result{}
if err := xml.Unmarshal(data, &result); err != nil {
fmt.Printf("error: %v", err)
}
service := result.Entry.Content.Service
return models.Location{
Id: serviceId,
Name: service.Name,
Lat: service.GeographicCoordinates.Lat,
Lng: service.GeographicCoordinates.Lng,
Types: []string{strings.ToLower(service.Type.Code)},
}
}
func ServicesByPostcode(serviceType, postcode string) ([]models.Location, error) {
data, _ := http_client.Get(buildNHSServicesPostcodeUri(serviceType, postcode))
result := ResultPostcodeXML{}
if err := xml.Unmarshal(data, &result); err != nil {
return nil, err
}
var locations []models.Location
for _, entry := range result.Entries {
// use go routines here niiiiiice
location := ServicesFinder(serviceType, extractServiceIdFromUrl(entry.Id))
locations = append(locations, location)
}
return locations, nil
}
func buildNHSServicesUri(serviceType, serviceId string) string {
return fmt.Sprintf("%s/services/types/%s/%s.xml?apikey=%s", NHSBaseAPI, serviceType, serviceId, os.Getenv("NHS_API_KEY"))
}
func buildNHSServicesPostcodeUri(serviceType, postcode string) string {
return fmt.Sprintf("%s/services/types/%s/postcode/%s.xml?apikey=%s", NHSBaseAPI, serviceType, postcode, os.Getenv("NHS_API_KEY"))
}
func extractServiceIdFromUrl(url string) string {
urlSplited := strings.Split(url, "/")
return urlSplited[len(urlSplited)-1]
}
|
package core
import (
"../models"
"encoding/json"
"net/http"
)
func Login(requestUser *models.User) (int, []byte) {
authBackend := InitJWTAuthenticationBackend()
if authBackend.Authenticate(requestUser) {
id := "12345" //get id from DB
token, err := authBackend.Generate(id)
if err != nil {
return http.StatusInternalServerError, []byte("")
} else {
response, _ := json.Marshal(models.TokenAuthentication{token})
return http.StatusOK, response
}
}
response, _ := json.Marshal(models.ErrorMessage{"User not found"})
return http.StatusUnauthorized, response
}
//TODO RefreshToken to be optimized
func RefreshToken(requestUser *models.User) []byte {
id := "12345" //get id from DB
authBackend := InitJWTAuthenticationBackend()
token, err := authBackend.Generate(id)
if err != nil {
panic(err)
}
response,err := json.Marshal(models.TokenAuthentication{token})
if err != nil {
panic(err)
}
return response
}
//TODO Logout func should be coded
//func Logout(r *http.Request) error {
// authBackend := InitJWTAuthenticationBackend()
// tokenRequest, err := request.ParseFromRequest(r, request.OAuth2Extractor, func(token *jwt.Token) (interface{}, error){
// return authBackend.PublicKey, nil
// })
// if err != nil {
// return err
// }
// tokenString := r.Header.Get("Authorization")
//
//} |
/*
Copyright (c) 2017 Simon Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package file
import "os"
import "sync"
import "sync/atomic"
type File struct {
refc int32
lock sync.Mutex
*os.File
name string
flag int
perm os.FileMode
}
func OpenFile(name string, flag int, perm os.FileMode) *File {
return &File{name:name,flag:flag,perm:perm}
}
func (f *File) Close() error {
if atomic.AddInt32(&f.refc,-1)!=0 { return nil }
f.lock.Lock(); defer f.lock.Unlock()
if f.File==nil { return nil }
f.File.Close()
f.File = nil
return nil
}
func (f *File) Open() error {
if atomic.AddInt32(&f.refc,1)!=1 { return nil }
f.lock.Lock(); defer f.lock.Unlock()
g,e := os.OpenFile(f.name,f.flag,f.perm)
if e!=nil {
atomic.AddInt32(&f.refc,-1)
return e
}
f.File = g
return nil
}
|
package main
import (
"fmt"
"time"
)
/*
In this example we show how to close a channel.
*/
func main() {
var c = make(chan int)
go doSomething(c)
for i := range c {
fmt.Printf("value read from channel is %d\n", i)
time.Sleep(1 * time.Second)
}
fmt.Println("Main go routine end")
}
func doSomething(c chan int) {
for i := 0; i < 2; i++ {
if i == 0 {
fmt.Println("#go routine I'm doing something...", i)
time.Sleep(1 * time.Second)
c <- i
fmt.Println("#go routine value written on channel is ", i)
}
if i == 1 {
fmt.Println("#go routine error close the channel...", i)
time.Sleep(1 * time.Second)
close(c)
break
}
}
}
|
package ds
/**
*
Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
You may assume that the array is non-empty and the majority element always exist in the array.
Example 1:
Input: [3,2,3]
Output: 3
Example 2:
Input: [2,2,1,1,1,2,2]
Output: 2
*
*
*/
/**
* @param {number[]} nums
* @return {number}
*/
// 方法一,排序
//func majorityElement(nums []int) int {
// length := len(nums)
// number := length / 2
// sort.Ints(nums)
// fmt.Println(nums)
// return nums[number]
//}
// 方法二,vote算法
func majorityElement(nums []int) int {
count := 1
val := nums[0]
for i := 1; i < len(nums); i++ {
if count == 0 {
val = nums[i]
count = 1
} else if nums[i] == val {
count++
} else if nums[i] != val {
count--
}
}
return val
}
|
package tccp
const VPC = `
{{define "vpc"}}
{{- $v := .Guest.VPC }}
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: {{ $v.CidrBlock }}
EnableDnsSupport: 'true'
EnableDnsHostnames: 'true'
Tags:
- Key: Name
Value: {{ $v.ClusterID }}
- Key: Installation
Value: {{ $v.InstallationName }}
VPCPeeringConnection:
Type: 'AWS::EC2::VPCPeeringConnection'
Properties:
VpcId: !Ref VPC
PeerVpcId: {{ $v.PeerVPCID }}
PeerOwnerId: '{{ $v.HostAccountID }}'
PeerRoleArn: {{ $v.PeerRoleArn }}
Tags:
- Key: Name
Value: {{ $v.ClusterID }}
VPCS3Endpoint:
Type: 'AWS::EC2::VPCEndpoint'
Properties:
VpcId: !Ref VPC
RouteTableIds:
{{- range $v.RouteTableNames }}
- !Ref {{ .ResourceName }}
{{- end}}
ServiceName: 'com.amazonaws.{{ $v.Region }}.s3'
PolicyDocument:
Version: "2012-10-17"
Statement:
- Sid: "{{ $v.ClusterID }}-vpc-s3-endpoint-policy-bucket"
Principal : "*"
Effect: "Allow"
Action: "s3:*"
Resource: "arn:{{ $v.RegionARN }}:s3:::*"
- Sid: "{{ $v.ClusterID }}-vpc-s3-endpoint-policy-object"
Principal : "*"
Effect: "Allow"
Action: "s3:*"
Resource: "arn:{{ $v.RegionARN }}:s3:::*/*"
{{end}}
`
|
package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"regexp"
)
// A function to check the equality of string slices
func equalSlice(compare1 []string, compare2 []string) bool {
if len(compare1) != len(compare2) {
return false
} else {
i := 0
for i < len(compare1) {
if compare1[i] != compare2[i] {
return false
}
i += 1
}
}
return true
}
// A function that extracts the string between two XML tags
func getBetween(input []string, tag string) (list []string) {
left := "<" + tag + ">"
right := "</" + tag + ">"
rx := regexp.MustCompile(`(?s)` + regexp.QuoteMeta(left) + `(.*?)` + regexp.QuoteMeta(right))
i := 0
for i < len(input) {
matches := rx.FindAllStringSubmatch(input[i], -1)
if matches != nil {
list = append(list, matches[0][1])
}
i += 1
}
return list
}
// A function that will add the stem of the URL
func addStem(list []string, stem string) []string {
links := make([]string, len(list))
i := 0
for i < len(list) {
links[i] = stem + list[i]
i += 1
}
return links
}
// A function to read XML
func readXml(filepath string) []string {
file, err := os.Open(filepath)
if err != nil {
fmt.Println(err)
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines
}
// A function to write the output to a file
func writeFile(filepath string, list []string) {
f, err := os.Create(filepath)
if err != nil {
log.Fatal(err)
}
defer f.Close()
i := 0
for i < len(list) {
_, err2 := f.WriteString(list[i] + "\n")
if err2 != nil {
log.Fatal(err2)
}
i += 1
}
fmt.Println("File written successfully to " + filepath)
}
func main() {
outputPointer := flag.Bool("o", false, "If the output goes to a file, else will print")
inputPointer := flag.String("i", "./testFiles/plant_catalog.xml", "The input filepath for the XML file")
tagPointer := flag.String("t", "COMMON", "The tag to search between")
stemPointer := flag.String("s", "", "The stem of the url")
flag.Parse()
links := addStem(getBetween(readXml(*inputPointer), *tagPointer), *stemPointer)
i := 0
if *outputPointer == true {
writeFile("output.txt", links)
} else {
for i < len(links) {
fmt.Println(links[i])
i += 1
}
}
}
|
package models
import (
"github.com/lempiy/echo_api/types"
"github.com/lempiy/echo_api/utils"
)
type user struct{}
var User *user
func (u *user) Create(user *types.User) error {
encryptPass := utils.EncryptPassword(user.Password)
sqlQuery := `INSERT INTO person(username, password, login, age, telephone, created_date)
VALUES($1,$2,$3,$4,$5,now());`
err := Database.SingleQuery(sqlQuery, user.Username, encryptPass, user.Login,
user.Age, user.Telephone)
return err
}
func (u *user) Read(id int) (*types.User, error) {
var user types.User
sqlQuery := `SELECT * FROM person WHERE id=$1;`
rows := Database.Query(sqlQuery, id)
defer rows.Close()
if rows.Next() {
err := rows.Scan(&user.ID,&user.Username,&user.Password,&user.Login,&user.Age,&user.Telephone,&user.CreatedDate)
if err != nil {
return nil, err
}
}
return &user, nil
}
func (u *user) ReadByLogin(login string) (*types.User, error) {
var user types.User
sqlQuery := `SELECT p.id, p.username, p.password, p.login,
p.age, p.telephone, p.created_date FROM person p WHERE login=$1;`
rows := Database.Query(sqlQuery, login)
defer rows.Close()
if rows.Next() {
err := rows.Scan(&user.ID,&user.Username,&user.Password,&user.Login,&user.Age,&user.Telephone,&user.CreatedDate)
if err != nil {
return nil, err
}
}
return &user, nil
}
|
package main
import (
"fmt"
"github.com/liuzl/ling"
)
var nlp = ling.MustNLP(ling.Norm)
func main() {
text := "北京ทันทุกเหตุการ有限公司"
d := ling.NewDocument(text)
if err := nlp.Annotate(d); err != nil {
fmt.Println(err)
return
}
fmt.Println(text)
for i, token := range d.Tokens {
fmt.Println(i, token, token.Type)
}
}
|
package main
import (
"fmt"
"math"
)
func Sqrt(x float64) float64 {
var low float64 = 0
var hight float64 = x
var mid float64 = (hight+low)/2
var precise float64 = 0.01
for math.Abs(mid*mid-x)>precise{
fmt.Println(low,hight,mid,precise)
fmt.Println(math.Abs(mid*mid-x))
if mid*mid > x{
hight = mid
}else{
low = mid
}
mid = (hight+low)/2
}
return mid
}
func main() {
fmt.Println(Sqrt(2))
}
|
package custPkg
func Min(arry []float64) float64 {
temp := 0.0
temp = arry[0]
for i := 1; i < len(arry); i++ {
if temp > arry[i] {
temp = arry[i]
}
}
return temp
}
func Max(arry []float64) float64 {
temp := 0.0
temp = arry[0]
for i := 1; i < len(arry); i++ {
if temp < arry[i] {
temp = arry[i]
}
}
return temp
}
|
// Copyright 2021 The image-cloner Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"net/http"
"os"
"github.com/gauravgahlot/image-cloner/internal/docker"
)
// Server defines the basic operations for image-cloner server.
type Server interface {
Serve() error
}
type server struct {
httpServer http.Server
client docker.Client
registryUser string
registry string
}
// Setup initializes and returns a server; error otherwise.
func Setup(cfg Config) (Server, error) {
client, err := docker.CreateClient()
if err != nil {
return nil, err
}
s := server{
client: client,
registryUser: docker.RegistryUser(),
registry: os.Getenv("REGISTRY"),
httpServer: http.Server{
Addr: cfg.Addr,
TLSConfig: configTLS(cfg),
},
}
http.HandleFunc("/readyz", s.readyz)
http.HandleFunc("/clone-image", s.cloneImage)
return &s, nil
}
func (s *server) Serve() error {
return s.httpServer.ListenAndServeTLS("", "")
}
|
package main
import "fmt"
func main() {
i := 10
fmt.Printf("%v and %T\n", i, i)
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"strings"
)
const (
rootPackage = "github.com/openshift/origin"
)
// Package is a subset of cmd/go.Package
type Package struct {
ImportPath string `json:",omitempty"` // import path of package in dir
Imports []string `json:",omitempty"` // import paths used by this package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
type ImportRestriction struct {
// CheckedPackageRoots are the roots of the package tree
// that are restricted by this configuration
CheckedPackageRoots []string `json:"checkedPackageRoots"`
// CheckedPackages are the specific packages
// that are restricted by this configuration
CheckedPackages []string `json:"checkedPackages"`
// IgnoredSubTrees are roots of sub-trees of the
// BaseImportPath for which we do not want to enforce
// any import restrictions whatsoever
IgnoredSubTrees []string `json:"ignoredSubTrees,omitempty"`
// AllowedImportPackages are roots of package trees that
// are allowed to be imported for this restriction
AllowedImportPackages []string `json:"allowedImportPackages"`
// AllowedImportPackageRoots are roots of package trees that
// are allowed to be imported for this restriction
AllowedImportPackageRoots []string `json:"allowedImportPackageRoots"`
// ForbiddenImportPackageRoots are roots of package trees that
// are NOT allowed to be imported for this restriction
ForbiddenImportPackageRoots []string `json:"forbiddenImportPackageRoots"`
}
// ForbiddenImportsFor determines all of the forbidden
// imports for a package given the import restrictions
func (i *ImportRestriction) ForbiddenImportsFor(pkg Package) []string {
if !i.isRestrictedPath(pkg.ImportPath) {
return []string{}
}
return i.forbiddenImportsFor(pkg)
}
// isRestrictedPath determines if the import path has
// any restrictions placed on it by this configuration.
// A path will be restricted if:
// - it falls under the base import path
// - it does not fall under any of the ignored sub-trees
func (i *ImportRestriction) isRestrictedPath(packageToCheck string) bool {
// if its not under our root, then its a built-in. Everything else is under
// github.com/openshift/origin or github.com/openshift/origin/vendor
if !strings.HasPrefix(packageToCheck, rootPackage) {
return false
}
// some subtrees are specifically excluded. Not sure if we still need this given
// explicit inclusion
for _, ignored := range i.IgnoredSubTrees {
if strings.HasPrefix(packageToCheck, ignored) {
return false
}
}
return true
}
// forbiddenImportsFor determines all of the forbidden
// imports for a package given the import restrictions
// and returns a deduplicated list of them
func (i *ImportRestriction) forbiddenImportsFor(pkg Package) []string {
forbiddenImportSet := map[string]struct{}{}
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
if !i.isAllowed(packageToCheck) {
forbiddenImportSet[relativePackage(packageToCheck)] = struct{}{}
}
}
var forbiddenImports []string
for imp := range forbiddenImportSet {
forbiddenImports = append(forbiddenImports, imp)
}
return forbiddenImports
}
// isForbidden determines if an import is forbidden,
// which is true when the import is:
// - of a package under the rootPackage
// - is not of the base import path or a sub-package of it
// - is not of an allowed path or a sub-package of one
func (i *ImportRestriction) isAllowed(packageToCheck string) bool {
// if its not under our root, then its a built-in. Everything else is under
// github.com/openshift/origin or github.com/openshift/origin/vendor
if !strings.HasPrefix(packageToCheck, rootPackage) {
return true
}
if i.isIncludedInRestrictedPackages(packageToCheck) {
return true
}
for _, forbiddenPackageRoot := range i.ForbiddenImportPackageRoots {
if strings.HasPrefix(forbiddenPackageRoot, "vendor") {
forbiddenPackageRoot = rootPackage + "/" + forbiddenPackageRoot
}
if strings.HasPrefix(packageToCheck, forbiddenPackageRoot) {
return false
}
}
for _, allowedPackage := range i.AllowedImportPackages {
if strings.HasPrefix(allowedPackage, "vendor") {
allowedPackage = rootPackage + "/" + allowedPackage
}
if packageToCheck == allowedPackage {
return true
}
}
for _, allowedPackageRoot := range i.AllowedImportPackageRoots {
if strings.HasPrefix(allowedPackageRoot, "vendor") {
allowedPackageRoot = rootPackage + "/" + allowedPackageRoot
}
if strings.HasPrefix(packageToCheck, allowedPackageRoot) {
return true
}
}
return false
}
// isIncludedInRestrictedPackages checks to see if a package is included in the list of packages we're
// restricting. Any package being restricted is assumed to be allowed to import another package being
// restricted since they are grouped
func (i *ImportRestriction) isIncludedInRestrictedPackages(packageToCheck string) bool {
// some subtrees are specifically excluded. Not sure if we still need this given
// explicit inclusion
for _, ignored := range i.IgnoredSubTrees {
if strings.HasPrefix(packageToCheck, ignored) {
return false
}
}
for _, currBase := range i.CheckedPackageRoots {
if strings.HasPrefix(packageToCheck, currBase) {
return true
}
}
for _, currPackageName := range i.CheckedPackages {
if currPackageName == packageToCheck {
return true
}
}
return false
}
func relativePackage(absolutePackage string) string {
if strings.HasPrefix(absolutePackage, rootPackage+"/vendor") {
return absolutePackage[len(rootPackage)+1:]
}
return absolutePackage
}
func main() {
if len(os.Args) != 2 {
log.Fatalf("%s requires the configuration file as it's only argument", os.Args[0])
}
configFile := os.Args[1]
importRestrictions, err := loadImportRestrictions(configFile)
if err != nil {
log.Fatalf("Failed to load import restrictions: %v", err)
}
failedRestrictionCheck := false
for _, restriction := range importRestrictions {
packages := []Package{}
for _, currBase := range restriction.CheckedPackageRoots {
log.Printf("Inspecting imports under %s...\n", currBase)
currPackages, err := resolvePackage(currBase + "/...")
if err != nil {
log.Fatalf("Failed to resolve package tree %v: %v", currBase, err)
}
packages = mergePackages(packages, currPackages)
}
for _, currPackageName := range restriction.CheckedPackages {
log.Printf("Inspecting imports at %s...\n", currPackageName)
currPackages, err := resolvePackage(currPackageName)
if err != nil {
log.Fatalf("Failed to resolve package %v: %v", currPackageName, err)
}
packages = mergePackages(packages, currPackages)
}
if len(packages) == 0 {
log.Fatalf("No packages found")
}
log.Printf("-- validating imports for %d packages in the tree", len(packages))
for _, pkg := range packages {
if forbidden := restriction.ForbiddenImportsFor(pkg); len(forbidden) != 0 {
logForbiddenPackages(relativePackage(pkg.ImportPath), forbidden)
failedRestrictionCheck = true
}
}
// make sure that all the allowed imports are used
if unused := unusedPackageImports(restriction.AllowedImportPackages, packages); len(unused) > 0 {
log.Printf("-- found unused package imports\n")
for _, unusedPackage := range unused {
log.Printf("\t%s\n", unusedPackage)
}
failedRestrictionCheck = true
}
if unused := unusedPackageImportRoots(restriction.AllowedImportPackageRoots, packages); len(unused) > 0 {
log.Printf("-- found unused package import roots\n")
for _, unusedPackage := range unused {
log.Printf("\t%s\n", unusedPackage)
}
failedRestrictionCheck = true
}
log.Printf("\n")
}
if failedRestrictionCheck {
os.Exit(1)
}
}
func unusedPackageImports(allowedPackageImports []string, packages []Package) []string {
ret := []string{}
for _, allowedImport := range allowedPackageImports {
if strings.HasPrefix(allowedImport, "vendor") {
allowedImport = rootPackage + "/" + allowedImport
}
found := false
for _, pkg := range packages {
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
if packageToCheck == allowedImport {
found = true
break
}
}
}
if !found {
ret = append(ret, relativePackage(allowedImport))
}
}
return ret
}
func unusedPackageImportRoots(allowedPackageImportRoots []string, packages []Package) []string {
ret := []string{}
for _, allowedImportRoot := range allowedPackageImportRoots {
if strings.HasPrefix(allowedImportRoot, "vendor") {
allowedImportRoot = rootPackage + "/" + allowedImportRoot
}
found := false
for _, pkg := range packages {
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
if strings.HasPrefix(packageToCheck, allowedImportRoot) {
found = true
break
}
}
}
if !found {
ret = append(ret, relativePackage(allowedImportRoot))
}
}
return ret
}
func mergePackages(existingPackages, currPackages []Package) []Package {
for _, currPackage := range currPackages {
found := false
for _, existingPackage := range existingPackages {
if existingPackage.ImportPath == currPackage.ImportPath {
log.Printf("-- Skipping: %v", currPackage.ImportPath)
found = true
}
}
if !found {
// this was super noisy.
//log.Printf("-- Adding: %v", currPackage.ImportPath)
existingPackages = append(existingPackages, currPackage)
}
}
return existingPackages
}
func loadImportRestrictions(configFile string) ([]ImportRestriction, error) {
config, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to load configuration from %s: %v", configFile, err)
}
var importRestrictions []ImportRestriction
if err := json.Unmarshal(config, &importRestrictions); err != nil {
return nil, fmt.Errorf("failed to unmarshal from %s: %v", configFile, err)
}
return importRestrictions, nil
}
func resolvePackage(targetPackage string) ([]Package, error) {
cmd := "go"
args := []string{"list", "-json", targetPackage}
stdout, err := exec.Command(cmd, args...).Output()
if err != nil {
return nil, fmt.Errorf("Failed to run `%s %s`: %v\n", cmd, strings.Join(args, " "), err)
}
packages, err := decodePackages(bytes.NewReader(stdout))
if err != nil {
return nil, fmt.Errorf("Failed to decode packages: %v", err)
}
return packages, nil
}
func decodePackages(r io.Reader) ([]Package, error) {
// `go list -json` concatenates package definitions
// instead of emitting a single valid JSON, so we
// need to stream the output to decode it into the
// data we are looking for instead of just using a
// simple JSON decoder on stdout
var packages []Package
decoder := json.NewDecoder(r)
for decoder.More() {
var pkg Package
if err := decoder.Decode(&pkg); err != nil {
return nil, fmt.Errorf("invalid package: %v", err)
}
packages = append(packages, pkg)
}
return packages, nil
}
func logForbiddenPackages(base string, forbidden []string) {
log.Printf("-- found forbidden imports for %s:\n", base)
for _, forbiddenPackage := range forbidden {
log.Printf("\t%s\n", forbiddenPackage)
}
}
|
package web
import (
"encoding/json"
"fmt"
"net/http"
"github.com/siggy/bbox/beatboxer/render"
log "github.com/sirupsen/logrus"
)
type Web struct {
hub *Hub
}
func InitWeb() *Web {
log.Debugf("InitWeb")
hub := newHub()
go hub.run()
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "beatboxer/render/web/color.html")
})
http.HandleFunc("/beatboxer", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "beatboxer/render/web/beatboxer.html")
})
http.HandleFunc("/jquery-1.11.1.js", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "beatboxer/render/web/jquery-1.11.1.js")
})
http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
serveWs(hub, w, r)
})
go func() {
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}()
return &Web{
hub: hub,
}
}
func (w *Web) Render(state render.State) {
b, err := json.Marshal(state)
if err != nil {
fmt.Println(err)
return
}
go func() { w.hub.render <- b }()
}
func (w *Web) Phone() <-chan phoneEvent {
return w.hub.phoneEvents
}
// func (w *Web) Init(
// freq int,
// gpioPin1 int, ledCount1 int, brightness1 int,
// gpioPin2 int, ledCount2 int, brightness2 int,
// ) error {
// return nil
// }
// func (w *Web) Fini() {
// }
// func (w *Web) Render() error {
// return nil
// }
// func (w *Web) Wait() error {
// return nil
// }
// func (w *Web) SetLed(channel int, index int, value uint32) {
// w.hub.send(fmt.Sprintf("%d: %3d %d", channel, index, value))
// }
// func (w *Web) Clear() {
// }
// func (w *Web) SetBitmap(channel int, a []uint32) {
// }
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
type coords struct {
row int
col int
}
var arr = [1000][1000]int{}
var grid = arr[:]
func main() {
file, err := os.Open("../input")
if err != nil {
log.Fatalln("Cannot read file", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
s := scanner.Text()
light(s)
}
fmt.Println(howManyAreLit())
}
func light(s string) {
command := strings.Split(s, " ")
switch command[0] {
case "toggle":
toggle(getCoords(command[1]), getCoords(command[3]))
case "turn":
switch command[1] {
case "on":
turnOn(getCoords(command[2]), getCoords(command[4]))
case "off":
turnOff(getCoords(command[2]), getCoords(command[4]))
}
}
}
func getCoords(pair string) coords {
a := strings.Split(pair, ",")
row, _ := strconv.Atoi(a[0])
col, _ := strconv.Atoi(a[1])
return coords{row, col}
}
func toggle(from, to coords) {
for i := from.row; i <= to.row; i++ {
for j := from.col; j <= to.col; j++ {
grid[i][j] = (grid[i][j] - 1) * -1
}
}
}
func turnOn(from, to coords) {
for i := from.row; i <= to.row; i++ {
for j := from.col; j <= to.col; j++ {
grid[i][j] = 1
}
}
}
func turnOff(from, to coords) {
for i := from.row; i <= to.row; i++ {
for j := from.col; j <= to.col; j++ {
grid[i][j] = 0
}
}
}
func howManyAreLit() int {
count := 0
for _, slice := range grid {
for _, light := range slice {
if light == 1 {
count++
}
}
}
return count
}
|
// log包实现了基于logrus的日志管理器
package log
import (
"fmt"
"github.com/ebar-go/ego/component/trace"
"github.com/ebar-go/ego/utils"
"github.com/ebar-go/ego/utils/date"
"github.com/ebar-go/ego/utils/file"
"github.com/sirupsen/logrus"
"os"
"path/filepath"
)
// Logger 日志接口
type Logger interface {
Info(message string, context Context)
Debug(message string, context Context)
Warn(message string, context Context)
Error(message string, context Context)
Fatal(message string, context Context)
SetExtends(extends Context)
}
// logger 日志结构体
type logger struct {
instance *logrus.Logger // logrus实例
extends Context
}
type Context map[string]interface{}
// New 获取默认的日志管理器,输出到控制台
func New() Logger {
l := &logger{}
l.instance = defaultInstance()
return l
}
// NewFileLogger 根据文件初始化日志
func NewFileLogger(filePath string) Logger {
logger := &logger{}
logger.instance = defaultInstance()
if !file.Exist(filePath) {
err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm)
if err != nil {
fmt.Printf("Failed to init logger:%s,%s\n", filePath, err.Error())
return logger
}
}
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, os.ModePerm)
if err == nil {
logger.instance.Out = f
fmt.Printf("Init Logger Success:%s\n", filePath)
} else {
fmt.Printf("Failed to init logger:%s,%s\n", filePath, err.Error())
}
return logger
}
// getDefaultLogInstance 实例化默认日志实例
func defaultInstance() *logrus.Logger {
instance := logrus.New()
// 设置日志格式为json
instance.SetFormatter(&logrus.JSONFormatter{
FieldMap: logrus.FieldMap{
logrus.FieldKeyTime: "datetime",
logrus.FieldKeyLevel: "level_name",
logrus.FieldKeyMsg: "message",
logrus.FieldKeyFunc: "caller",
},
TimestampFormat: date.TimeFormat,
})
instance.Level = logrus.DebugLevel
return instance
}
func (l *logger) SetExtends(extends Context) {
l.extends = extends
}
// withFields merge extends
func (l *logger) withFields(context Context) *logrus.Entry {
if _, ok := context["trace_id"]; !ok {
context["trace_id"] = trace.GetTraceId()
}
return l.instance.WithFields(logrus.Fields{
"context": utils.MergeMaps(l.extends, context),
})
}
// Debug
func (l *logger) Debug(message string, context Context) {
l.withFields(context).Debug(message)
}
// Info
func (l *logger) Info(message string, context Context) {
l.withFields(context).Info(message)
}
// Warn
func (l *logger) Warn(message string, context Context) {
l.withFields(context).Warn(message)
}
// Error
func (l *logger) Error(message string, context Context) {
l.withFields(context).Error(message)
}
// Fatal
func (l *logger) Fatal(message string, context Context) {
l.withFields(context).Fatal(message)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eventarc
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type Channel struct{}
func ChannelToUnstructured(r *dclService.Channel) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "eventarc",
Version: "beta",
Type: "Channel",
},
Object: make(map[string]interface{}),
}
if r.ActivationToken != nil {
u.Object["activationToken"] = *r.ActivationToken
}
if r.CreateTime != nil {
u.Object["createTime"] = *r.CreateTime
}
if r.CryptoKeyName != nil {
u.Object["cryptoKeyName"] = *r.CryptoKeyName
}
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.PubsubTopic != nil {
u.Object["pubsubTopic"] = *r.PubsubTopic
}
if r.State != nil {
u.Object["state"] = string(*r.State)
}
if r.ThirdPartyProvider != nil {
u.Object["thirdPartyProvider"] = *r.ThirdPartyProvider
}
if r.Uid != nil {
u.Object["uid"] = *r.Uid
}
if r.UpdateTime != nil {
u.Object["updateTime"] = *r.UpdateTime
}
return u
}
func UnstructuredToChannel(u *unstructured.Resource) (*dclService.Channel, error) {
r := &dclService.Channel{}
if _, ok := u.Object["activationToken"]; ok {
if s, ok := u.Object["activationToken"].(string); ok {
r.ActivationToken = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ActivationToken: expected string")
}
}
if _, ok := u.Object["createTime"]; ok {
if s, ok := u.Object["createTime"].(string); ok {
r.CreateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CreateTime: expected string")
}
}
if _, ok := u.Object["cryptoKeyName"]; ok {
if s, ok := u.Object["cryptoKeyName"].(string); ok {
r.CryptoKeyName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CryptoKeyName: expected string")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["pubsubTopic"]; ok {
if s, ok := u.Object["pubsubTopic"].(string); ok {
r.PubsubTopic = dcl.String(s)
} else {
return nil, fmt.Errorf("r.PubsubTopic: expected string")
}
}
if _, ok := u.Object["state"]; ok {
if s, ok := u.Object["state"].(string); ok {
r.State = dclService.ChannelStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.State: expected string")
}
}
if _, ok := u.Object["thirdPartyProvider"]; ok {
if s, ok := u.Object["thirdPartyProvider"].(string); ok {
r.ThirdPartyProvider = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ThirdPartyProvider: expected string")
}
}
if _, ok := u.Object["uid"]; ok {
if s, ok := u.Object["uid"].(string); ok {
r.Uid = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Uid: expected string")
}
}
if _, ok := u.Object["updateTime"]; ok {
if s, ok := u.Object["updateTime"].(string); ok {
r.UpdateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.UpdateTime: expected string")
}
}
return r, nil
}
func GetChannel(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToChannel(u)
if err != nil {
return nil, err
}
r, err = c.GetChannel(ctx, r)
if err != nil {
return nil, err
}
return ChannelToUnstructured(r), nil
}
func ListChannel(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListChannel(ctx, project, location)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, ChannelToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyChannel(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToChannel(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToChannel(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyChannel(ctx, r, opts...)
if err != nil {
return nil, err
}
return ChannelToUnstructured(r), nil
}
func ChannelHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToChannel(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToChannel(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyChannel(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteChannel(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToChannel(u)
if err != nil {
return err
}
return c.DeleteChannel(ctx, r)
}
func ChannelID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToChannel(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Channel) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"eventarc",
"Channel",
"beta",
}
}
func (r *Channel) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Channel) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Channel) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *Channel) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Channel) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Channel) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Channel) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetChannel(ctx, config, resource)
}
func (r *Channel) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyChannel(ctx, config, resource, opts...)
}
func (r *Channel) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return ChannelHasDiff(ctx, config, resource, opts...)
}
func (r *Channel) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteChannel(ctx, config, resource)
}
func (r *Channel) ID(resource *unstructured.Resource) (string, error) {
return ChannelID(resource)
}
func init() {
unstructured.Register(&Channel{})
}
|
package schemas
import (
"time"
)
type UserSchema struct {
ID uint64 `json:"id"`
Username string `json:"username" binding:"required,min=3,max=100"`
Email string `json:"email" binding:"required,email"`
Password string `json:"password" binding:"required,min=6,max=100"`
Phone string `json:"phone" binding:"required,number,min=10,max=10"`
Avatar string `json:"avatar"`
Image string `json:"image,omitempty"`
Role string `json:"role"`
Region string `json:"region,omitempty" binding:"required"`
Language string `json:"language,omitempty" binding:"required"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type ResendEmail struct {
Email string `json:"email" binding:"required,email"`
}
|
package main
import (
"os"
"strconv"
)
func main() {
localPortString := os.Getenv("CFIGGIS_PORT")
redisHostString := os.Getenv("CFIGGIS_REDIS_HOST")
redisPortString := os.Getenv("CFIGGIS_REDIS_PORT")
localPort := 765
redisPort := 6379
if localPortString != "" {
localPort, _ = strconv.Atoi(localPortString)
}
if redisHostString == "" {
redisHostString = "localhost"
}
if redisPortString != "" {
redisPort, _ = strconv.Atoi(redisPortString)
}
server := NewServer(localPort, redisHostString, redisPort)
server.Listen()
}
|
package main
import (
"bufio"
"chat/sandbox/redis/04/db"
"chat/sandbox/redis/04/element"
"chat/sandbox/redis/04/receiver"
"chat/sandbox/redis/04/redis"
"chat/sandbox/redis/04/room"
"chat/sandbox/redis/04/sender"
"context"
"fmt"
"os"
"os/user"
"strconv"
"strings"
"time"
)
func main() {
if len(os.Args) < 2 {
fmt.Println("ERROR: not enough args")
os.Exit(1)
}
roomID := os.Args[1]
rAddr := ":6379"
rp := redis.NewPool(rAddr, 10*time.Minute)
rr := receiver.NewRedisReceiver(rAddr)
rs := sender.NewRedisSender(rp)
rdb := db.NewDatabase(nil)
sv := room.NewSupervisor(rr, rs, rdb)
id, _ := strconv.ParseInt(roomID, 10, 64)
room := sv.GetRoom(id)
ctx := context.Background()
go room.Run(ctx)
scanner := bufio.NewScanner(os.Stdin)
for {
if !scanner.Scan() {
return
}
text := scanner.Text()
fields := strings.Fields(text)
if len(fields) > 1 {
user, _ := user.Current()
msg := element.Message{User: user.Username, Body: fields[0]}
room.Send(ctx, msg, fields[1:]...)
} else {
fmt.Println("ERROR: not enough args")
}
}
}
// --------------------
// Redis
// --------------------
|
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package para
import (
"testing"
"time"
"github.com/33cn/chain33/common"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/log"
"github.com/stretchr/testify/mock"
_ "github.com/33cn/chain33/system"
"github.com/33cn/chain33/types"
typesmocks "github.com/33cn/chain33/types/mocks"
pt "github.com/GM-Publicchain/gm/plugin/dapp/paracross/types"
"github.com/stretchr/testify/assert"
)
func init() {
//types.Init("user.p.para.", nil)
log.SetLogLevel("error")
}
func getPrivKey(t *testing.T) crypto.PrivKey {
pk, err := common.FromHex("6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b")
assert.Nil(t, err)
secp, err := crypto.New(types.GetSignName("", types.SECP256K1))
assert.Nil(t, err)
priKey, err := secp.PrivKeyFromBytes(pk)
assert.Nil(t, err)
return priKey
}
func TestCalcCommitMsgTxs(t *testing.T) {
priKey := getPrivKey(t)
client := commitMsgClient{
privateKey: priKey,
}
nt1 := &pt.ParacrossNodeStatus{
Height: 1,
Title: "user.p.para",
}
nt2 := &pt.ParacrossNodeStatus{
Height: 2,
Title: "user.p.para",
}
notify := []*pt.ParacrossNodeStatus{nt1}
tx, count, err := client.calcCommitMsgTxs(notify)
assert.Nil(t, err)
assert.Equal(t, int64(1), count)
assert.NotNil(t, tx)
notify = append(notify, nt2)
tx, count, err = client.calcCommitMsgTxs(notify)
assert.Nil(t, err)
assert.Equal(t, int64(2), count)
assert.NotNil(t, tx)
tx, err = client.singleCalcTx(nt2)
assert.Nil(t, err)
assert.NotNil(t, tx)
}
func TestGetConsensusStatus(t *testing.T) {
para := new(client)
grpcClient := &typesmocks.Chain33Client{}
//grpcClient.On("GetFork", mock.Anything, &types.ReqKey{Key: []byte("ForkBlockHash")}).Return(&types.Int64{Data: 1}, errors.New("err")).Once()
para.grpcClient = grpcClient
commitCli := new(commitMsgClient)
commitCli.paraClient = para
block := &types.Block{
Height: 1,
MainHeight: 10,
}
status := &pt.ParacrossStatus{
Height: 1,
}
reply := &types.Reply{
IsOk: true,
Msg: types.Encode(status),
}
grpcClient.On("QueryChain", mock.Anything, mock.Anything).Return(reply, nil).Once()
ret, err := commitCli.getConsensusStatus(block)
assert.Nil(t, err)
assert.Equal(t, int64(1), ret.Height)
}
func TestSendCommitMsg(t *testing.T) {
para := new(client)
grpcClient := &typesmocks.Chain33Client{}
//grpcClient.On("GetFork", mock.Anything, &types.ReqKey{Key: []byte("ForkBlockHash")}).Return(&types.Int64{Data: 1}, errors.New("err")).Once()
para.grpcClient = grpcClient
commitCli := new(commitMsgClient)
commitCli.paraClient = para
commitCli.quit = make(chan struct{})
commitCli.paraClient.wg.Add(1)
sendMsgCh := make(chan *types.Transaction, 1)
go commitCli.sendCommitMsg(sendMsgCh)
//reply := &types.Reply{
// IsOk: true,
// Msg: types.Encode(status),
//}
grpcClient.On("SendTransaction", mock.Anything, mock.Anything).Return(nil, types.ErrNotFound).Twice()
tx := &types.Transaction{}
sendMsgCh <- tx
time.Sleep(3 * time.Second)
//para.BaseClient.Close()
close(commitCli.quit)
}
|
package main
import (
"github.com/alexliesenfeld/health"
"github.com/etherlabsio/healthcheck/v2/checkers"
"log"
"net/http"
)
func main() {
http.Handle("/health", health.NewHandler(
health.NewChecker(
health.WithCheck(health.Check{
Name: "disk",
Check: checkers.DiskSpace("/var/log", 90).Check,
}),
)))
log.Fatalln(http.ListenAndServe(":3000", nil))
}
|
package main
import (
"github.com/beego/beego/v2/core/logs"
)
func main() {
logs.Info("hello beego")
}
|
package nsdownload
import(
"strings"
"fmt"
"config"
"download"
"time"
"util"
)
type NationStatDownloader struct{
root config.ServiceAPI
child1 config.ServiceAPI
child2 config.ServiceAPI
child3 config.ServiceAPI
period config.ServiceAPI
data config.ServiceAPI
}
func (d *NationStatDownloader) Init() {
const id = "nationstat"
cm := config.NewServiceConfigManager()
//cm := config.NewServiceConfigManager1()
d.root = cm.GetApi(id, "indexroot")
d.child1 = cm.GetApi(id, "children1")
d.child2 = cm.GetApi(id, "children2")
d.child3 = cm.GetApi(id, "children3")
d.period = cm.GetApi(id, "timeperiod")
d.data = cm.GetApi(id, "data")
}
func (d *NationStatDownloader) GetServiceData(api config.ServiceAPI, v string) string {
var result string
switch api.Method {
case "GET":
url := fmt.Sprintf(api.Uri, v)
util.NewLog().Info(url)
result = download.HttpGet(url)
case "POST":
query := fmt.Sprintf(api.Data, v)
util.NewLog().Info(api.Uri, query)
result = download.HttpPostForm(api.Uri, query)
}
return result
}
func (d *NationStatDownloader) GetRoot() string {
uri := fmt.Sprintf(d.root.Uri, d.getRand())
return download.HttpGet(uri)
}
func (d *NationStatDownloader) GetChild(code string, level int) string {
var result string
//querycode := strings.Join(codes, ",")
switch level {
case 1:
result = d.GetServiceData(d.child1, code)
case 2:
result = d.GetServiceData(d.child2, code)
case 3:
result = d.GetServiceData(d.child3, code)
}
return result
}
func (d *NationStatDownloader) GetPeriod() string {
uri := fmt.Sprintf(d.period.Uri, d.getRand())
return download.HttpGet(uri)
}
func (d *NationStatDownloader) GetData(codes []string, start string, end string) string {
//start, end should be yyyyMM. The period like '-1,200101' means start from 2001-01 to current
querycode := strings.Join(codes, ",")
randn := d.getRand()
var period string
if end == "-1" {
period = fmt.Sprintf("%v,%v", end, start)
} else {
period = fmt.Sprintf("%v,%v", start, end)
}
url := fmt.Sprintf(d.data.Uri, randn, querycode, period)
util.NewLog().Info(url)
return download.HttpGet(url)
}
func (d *NationStatDownloader) getRand() int {
nanos := time.Now().UnixNano()
return int(nanos / 1000000)
}
func NewNationStatDownloader() *NationStatDownloader {
d := new(NationStatDownloader)
d.Init()
return d
}
|
package server
import (
"encoding/json"
"errors"
"net/http"
"github.com/imrenagi/go-payment"
)
type Meta struct {
TotalItems int `json:"total_items"`
TotalPages int `json:"total_pages"`
CurrentPage int `json:"cur_page"`
Cursor string `json:"last_cursor"`
}
// Error is struct used to return error message to the client
type Error struct {
StatusCode int `json:"error_code"`
Message string `json:"error_message"`
}
// Empty used to return nothing
type Empty struct{}
// WriteSuccessResponse creates success response for the http handler
func WriteSuccessResponse(w http.ResponseWriter, statusCode int, data interface{}, headMap map[string]string) {
w.Header().Add("Content-Type", "application/json")
if headMap != nil && len(headMap) > 0 {
for key, val := range headMap {
w.Header().Add(key, val)
}
}
w.WriteHeader(statusCode)
jsonData, _ := json.Marshal(data)
w.Write(jsonData)
}
// WriteFailResponse creates error response for the http handler
func WriteFailResponse(w http.ResponseWriter, statusCode int, error interface{}) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(statusCode)
jsonData, _ := json.Marshal(error)
w.Write(jsonData)
}
// WriteFailResponseFromError creates error response based on the given error
func WriteFailResponseFromError(w http.ResponseWriter, err error) {
var statusCode int
if errors.Is(err, payment.ErrNotFound) {
statusCode = http.StatusNotFound
} else if errors.Is(err, payment.ErrInternal) {
statusCode = http.StatusInternalServerError
} else if errors.Is(err, payment.ErrDatabase) {
statusCode = http.StatusInternalServerError
} else if errors.Is(err, payment.ErrBadRequest) {
statusCode = http.StatusBadRequest
} else if errors.Is(err, payment.ErrCantProceed) {
statusCode = http.StatusUnprocessableEntity
} else if errors.Is(err, payment.ErrUnauthorized) {
statusCode = http.StatusUnauthorized
} else if errors.Is(err, payment.ErrForbidden) {
statusCode = http.StatusForbidden
} else {
statusCode = http.StatusInternalServerError
}
errorMsg := Error{
Message: err.Error(),
StatusCode: statusCode,
}
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(errorMsg.StatusCode)
jsonData, _ := json.Marshal(errorMsg)
w.Write(jsonData)
}
|
package main
import (
"fmt"
)
func main() {
m := map[string][]string{
`bond_james`: []string{`Shaken, not stirred`, `Martinis`, `Women`},
`moneypenny_miss`: []string{`James Bond`, `Literature`, `Computer Science`},
`no_dr`: []string{`Being evil`, `Ice cream`, `Sunsets`},
}
fmt.Println(m)
m[`Sun_dan`] = []string{`Being human`, `Burger`, `Movies`}
fmt.Println(m)
delete(m, `moneypenny_miss`)
for i, v := range m {
fmt.Println(i)
for j, k := range v {
fmt.Printf("\t index position: %v \t value: %v\n", j, k)
}
}
}
|
// Package main -
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/shanehowearth/concurrency_in_go/bridge"
"github.com/shanehowearth/concurrency_in_go/pipeline"
"github.com/shanehowearth/concurrency_in_go/steward"
)
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(log.Ltime | log.LUTC)
doWork := func(done <-chan interface{}, _ time.Duration) <-chan interface{} {
log.Println("ward: Hello, I'm irresponsible!")
go func() {
log.Println("ward: I am halting.")
}()
return nil
}
doWorkFn := func(
done <-chan interface{},
intList ...int,
) (steward.StartGoroutineFn, <-chan interface{}) {
intChanStream := make(chan (<-chan interface{}))
intStream := bridge.Bridge(done, intChanStream)
doWork = func(
done <-chan interface{},
pulseInterval time.Duration,
) <-chan interface{} {
intStream := make(chan interface{})
heartbeat := make(chan interface{})
go func() {
defer close(intStream)
select {
case intChanStream <- intStream:
case <-done:
return
}
pulse := time.Tick(pulseInterval)
for {
valueLoop:
for _, intVal := range intList {
if intVal < 0 {
log.Printf("negative value: %v\n", intVal)
return
}
for {
select {
case <-pulse:
select {
case heartbeat <- struct{}{}:
default:
}
case intStream <- intVal:
continue valueLoop
case <-done:
return
}
}
}
}
}()
return heartbeat
}
return doWork, intStream
}
doWorkWithSteward := steward.NewSteward(4*time.Second, doWork)
done := make(chan interface{})
time.AfterFunc(9*time.Second, func() {
log.Println("main: halting steward and ward.")
close(done)
})
for range doWorkWithSteward(done, 4*time.Second) {
}
log.Println("Done")
log.SetFlags(log.Ltime | log.LUTC)
log.SetOutput(os.Stdout)
done = make(chan interface{})
defer close(done)
doWork, intStream := doWorkFn(done, 1, 2, -1, 3, 4, 5)
doWorkWithSteward = steward.NewSteward(1*time.Millisecond, doWork)
doWorkWithSteward(done, 1*time.Hour)
for intVal := range pipeline.Take(done, intStream, 6) {
fmt.Printf("Received: %v\n", intVal)
}
}
|
package handler
import (
"context"
"strings"
)
// DeployUsecases defines the trigger handler usecases.
//go:generate mockery -inpkg -testonly -name DeployUsecases
type DeployUsecases interface {
DeployR10KEnvAsync(ctx context.Context, env string, onSuccess func(string), onError func(string, error))
}
func getEnvironmentFromGITRef(ref string) string {
const refWantedPrefix = "refs/head/"
tmp := strings.Split(ref, refWantedPrefix)
if len(tmp) != 2 {
return ""
}
return tmp[1]
}
|
package main
import (
"testing"
"github.com/aws/aws-lambda-go/events"
"github.com/stretchr/testify/assert"
)
func TestUnmarshalStreamImage(t *testing.T) {
t.Run("unmarshalStreamImage", func(t *testing.T) {
event := event()
image := event.Records[0].Change.NewImage
var entry entry
unmarshalStreamImage(image, &entry)
assert.Equal(t, "testRole", entry.Role)
assert.Equal(t, "testRole", entry.Role)
assert.Equal(t, 100, entry.ValidResources)
assert.Equal(t, 20, entry.CfResources)
})
}
func event() events.DynamoDBEvent {
return events.DynamoDBEvent{
Records: []events.DynamoDBEventRecord{
{
Change: events.DynamoDBStreamRecord{
NewImage: map[string]events.DynamoDBAttributeValue{
"Role": events.NewStringAttribute("testRole"),
"Email": events.NewStringAttribute("testEmail"),
"ValidResources": events.NewNumberAttribute("100"),
"CfResources": events.NewNumberAttribute("20"),
},
},
},
},
}
}
|
package resto
import (
"context"
"encoding/json"
"errors"
"math/rand"
"net/http"
"net/http/httptest"
"reflect"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/saucelabs/saucectl/internal/job"
"github.com/saucelabs/saucectl/internal/vmd"
)
func TestClient_GetJobDetails(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1.1/test/jobs/1":
completeStatusResp := []byte(`{"browser_short_version": "85", "video_url": "https://localhost/jobs/1/video.mp4", "creation_time": 1605637528, "custom-data": null, "browser_version": "85.0.4183.83", "owner": "test", "automation_backend": "webdriver", "id": "1", "collects_automator_log": false, "record_screenshots": true, "record_video": true, "build": null, "passed": null, "public": "team", "assigned_tunnel_id": null, "status": "complete", "log_url": "https://localhost/jobs/1/selenium-server.log", "start_time": 1605637528, "proxied": false, "modification_time": 1605637554, "tags": [], "name": null, "commands_not_successful": 4, "consolidated_status": "complete", "selenium_version": null, "manual": false, "end_time": 1605637554, "error": null, "os": "Windows 10", "breakpointed": null, "browser": "googlechrome"}`)
w.Write(completeStatusResp)
case "/rest/v1.1/test/jobs/2":
errorStatusResp := []byte(`{"browser_short_version": "85", "video_url": "https://localhost/jobs/2/video.mp4", "creation_time": 1605637528, "custom-data": null, "browser_version": "85.0.4183.83", "owner": "test", "automation_backend": "webdriver", "id": "2", "collects_automator_log": false, "record_screenshots": true, "record_video": true, "build": null, "passed": null, "public": "team", "assigned_tunnel_id": null, "status": "error", "log_url": "https://localhost/jobs/2/selenium-server.log", "start_time": 1605637528, "proxied": false, "modification_time": 1605637554, "tags": [], "name": null, "commands_not_successful": 4, "consolidated_status": "error", "selenium_version": null, "manual": false, "end_time": 1605637554, "error": "User Abandoned Test -- User terminated", "os": "Windows 10", "breakpointed": null, "browser": "googlechrome"}`)
w.Write(errorStatusResp)
case "/rest/v1.1/test/jobs/3":
w.WriteHeader(http.StatusNotFound)
case "/rest/v1.1/test/jobs/4":
w.WriteHeader(http.StatusUnauthorized)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer ts.Close()
timeout := 3 * time.Second
testCases := []struct {
name string
client Client
jobID string
expectedResp job.Job
expectedErr error
}{
{
name: "get job details with ID 1 and status 'complete'",
client: New(ts.URL, "test", "123", timeout),
jobID: "1",
expectedResp: job.Job{
ID: "1",
Passed: false,
Status: "complete",
Error: "",
BrowserShortVersion: "85",
},
expectedErr: nil,
},
{
name: "get job details with ID 2 and status 'error'",
client: New(ts.URL, "test", "123", timeout),
jobID: "2",
expectedResp: job.Job{
ID: "2",
Passed: false,
Status: "error",
Error: "User Abandoned Test -- User terminated",
BrowserShortVersion: "85",
},
expectedErr: nil,
},
{
name: "job not found error from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "3",
expectedResp: job.Job{},
expectedErr: ErrJobNotFound,
},
{
name: "http status is not 200, but 401 from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "4",
expectedResp: job.Job{},
expectedErr: errors.New("job status request failed; unexpected response code:'401', msg:''"),
},
{
name: "internal server error from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "333",
expectedResp: job.Job{},
expectedErr: ErrServerError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := tc.client.ReadJob(context.Background(), tc.jobID)
assert.Equal(t, err, tc.expectedErr)
assert.Equal(t, got, tc.expectedResp)
})
}
}
func TestClient_GetJobStatus(t *testing.T) {
rand.Seed(time.Now().UnixNano())
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1.1/test/jobs/1":
details := &job.Job{
ID: "1",
Passed: false,
Status: "new",
Error: "",
}
randJobStatus(details, true)
resp, _ := json.Marshal(details)
w.Write(resp)
case "/rest/v1.1/test/jobs/2":
details := &job.Job{
ID: "2",
Passed: false,
Status: "in progress",
Error: "User Abandoned Test -- User terminated",
}
randJobStatus(details, false)
resp, _ := json.Marshal(details)
w.Write(resp)
case "/rest/v1.1/test/jobs/3":
w.WriteHeader(http.StatusNotFound)
case "/rest/v1.1/test/jobs/4":
w.WriteHeader(http.StatusUnauthorized)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer ts.Close()
timeout := 3 * time.Second
testCases := []struct {
name string
client Client
jobID string
expectedResp job.Job
expectedErr error
}{
{
name: "get job details with ID 1 and status 'complete'",
client: New(ts.URL, "test", "123", timeout),
jobID: "1",
expectedResp: job.Job{
ID: "1",
Passed: false,
Status: "complete",
Error: "",
},
expectedErr: nil,
},
{
name: "get job details with ID 2 and status 'error'",
client: New(ts.URL, "test", "123", timeout),
jobID: "2",
expectedResp: job.Job{
ID: "2",
Passed: false,
Status: "error",
Error: "User Abandoned Test -- User terminated",
},
expectedErr: nil,
},
{
name: "user not found error from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "3",
expectedResp: job.Job{},
expectedErr: ErrJobNotFound,
},
{
name: "http status is not 200, but 401 from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "4",
expectedResp: job.Job{},
expectedErr: errors.New("job status request failed; unexpected response code:'401', msg:''"),
},
{
name: "unexpected status code from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "333",
expectedResp: job.Job{},
expectedErr: ErrServerError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := tc.client.PollJob(context.Background(), tc.jobID, 10*time.Millisecond)
assert.Equal(t, err, tc.expectedErr)
assert.Equal(t, got, tc.expectedResp)
})
}
}
func TestClient_GetJobAssetFileNames(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1/test/jobs/1/assets":
completeStatusResp := []byte(`{"console.log": "console.log", "examples__actions.spec.js.mp4": "examples__actions.spec.js.mp4", "examples__actions.spec.js.json": "examples__actions.spec.js.json", "video.mp4": "video.mp4", "selenium-log": null, "sauce-log": null, "examples__actions.spec.js.xml": "examples__actions.spec.js.xml", "video": "video.mp4", "screenshots": []}`)
w.Write(completeStatusResp)
case "/rest/v1/test/jobs/2/assets":
w.WriteHeader(http.StatusNotFound)
case "/rest/v1/test/jobs/3/assets":
w.WriteHeader(http.StatusUnauthorized)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer ts.Close()
timeout := 3 * time.Second
testCases := []struct {
name string
client Client
jobID string
expectedResp []string
expectedErr error
}{
{
name: "get job asset with ID 1",
client: New(ts.URL, "test", "123", timeout),
jobID: "1",
expectedResp: []string{"console.log", "examples__actions.spec.js.mp4", "examples__actions.spec.js.json", "video.mp4", "examples__actions.spec.js.xml"},
expectedErr: nil,
},
{
name: "get job asset with ID 2",
client: New(ts.URL, "test", "123", timeout),
jobID: "2",
expectedResp: nil,
expectedErr: ErrJobNotFound,
},
{
name: "get job asset with ID 3",
client: New(ts.URL, "test", "123", timeout),
jobID: "3",
expectedResp: nil,
expectedErr: errors.New("job assets list request failed; unexpected response code:'401', msg:''"),
},
{
name: "get job asset with ID 4",
client: New(ts.URL, "test", "123", timeout),
jobID: "4",
expectedResp: nil,
expectedErr: ErrServerError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := tc.client.GetJobAssetFileNames(context.Background(), tc.jobID)
sort.Strings(tc.expectedResp)
sort.Strings(got)
assert.Equal(t, tc.expectedErr, err)
assert.Equal(t, tc.expectedResp, got)
})
}
}
func TestClient_GetJobAssetFileContent(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1/test/jobs/1/assets/console.log":
fileContent := []byte(`Sauce Cypress Runner 0.2.3`)
w.Write(fileContent)
case "/rest/v1/test/jobs/2/assets/console.log":
w.WriteHeader(http.StatusNotFound)
case "/rest/v1/test/jobs/3/assets/console.log":
w.WriteHeader(http.StatusUnauthorized)
fileContent := []byte(`unauthorized`)
w.Write(fileContent)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer ts.Close()
timeout := 3 * time.Second
testCases := []struct {
name string
client Client
jobID string
expectedResp []byte
expectedErr error
}{
{
name: "get job asset with ID 1",
client: New(ts.URL, "test", "123", timeout),
jobID: "1",
expectedResp: []byte(`Sauce Cypress Runner 0.2.3`),
expectedErr: nil,
},
{
name: "get job asset with ID 333 and Internal Server Error ",
client: New(ts.URL, "test", "123", timeout),
jobID: "333",
expectedResp: nil,
expectedErr: ErrServerError,
},
{
name: "get job asset with ID 2",
client: New(ts.URL, "test", "123", timeout),
jobID: "2",
expectedResp: nil,
expectedErr: ErrJobNotFound,
},
{
name: "get job asset with ID 3",
client: New(ts.URL, "test", "123", timeout),
jobID: "3",
expectedResp: nil,
expectedErr: errors.New("job status request failed; unexpected response code:'401', msg:'unauthorized'"),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := tc.client.GetJobAssetFileContent(context.Background(), tc.jobID, "console.log")
assert.Equal(t, err, tc.expectedErr)
assert.Equal(t, got, tc.expectedResp)
})
}
}
func randJobStatus(j *job.Job, isComplete bool) {
min := 1
max := 10
randNum := rand.Intn(max-min+1) + min
status := "error"
if isComplete {
status = "complete"
}
if randNum >= 5 {
j.Status = status
}
}
func TestClient_TestStop(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1/test/jobs/1/stop":
completeStatusResp := []byte(`{"browser_short_version": "85", "video_url": "https://localhost/jobs/1/video.mp4", "creation_time": 1605637528, "custom-data": null, "browser_version": "85.0.4183.83", "owner": "test", "automation_backend": "webdriver", "id": "1", "collects_automator_log": false, "record_screenshots": true, "record_video": true, "build": null, "passed": null, "public": "team", "assigned_tunnel_id": null, "status": "complete", "log_url": "https://localhost/jobs/1/selenium-server.log", "start_time": 1605637528, "proxied": false, "modification_time": 1605637554, "tags": [], "name": null, "commands_not_successful": 4, "consolidated_status": "complete", "selenium_version": null, "manual": false, "end_time": 1605637554, "error": null, "os": "Windows 10", "breakpointed": null, "browser": "googlechrome"}`)
w.Write(completeStatusResp)
case "/rest/v1/test/jobs/2/stop":
errorStatusResp := []byte(`{"browser_short_version": "85", "video_url": "https://localhost/jobs/2/video.mp4", "creation_time": 1605637528, "custom-data": null, "browser_version": "85.0.4183.83", "owner": "test", "automation_backend": "webdriver", "id": "2", "collects_automator_log": false, "record_screenshots": true, "record_video": true, "build": null, "passed": null, "public": "team", "assigned_tunnel_id": null, "status": "error", "log_url": "https://localhost/jobs/2/selenium-server.log", "start_time": 1605637528, "proxied": false, "modification_time": 1605637554, "tags": [], "name": null, "commands_not_successful": 4, "consolidated_status": "error", "selenium_version": null, "manual": false, "end_time": 1605637554, "error": "User Abandoned Test -- User terminated", "os": "Windows 10", "breakpointed": null, "browser": "googlechrome"}`)
w.Write(errorStatusResp)
case "/rest/v1/test/jobs/3/stop":
w.WriteHeader(http.StatusNotFound)
case "/rest/v1/test/jobs/4/stop":
w.WriteHeader(http.StatusUnauthorized)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer ts.Close()
timeout := 3 * time.Second
testCases := []struct {
name string
client Client
jobID string
expectedResp job.Job
expectedErr error
}{
{
name: "get job details with ID 2 and status 'error'",
client: New(ts.URL, "test", "123", timeout),
jobID: "2",
expectedResp: job.Job{
ID: "2",
Passed: false,
Status: "error",
Error: "User Abandoned Test -- User terminated",
BrowserShortVersion: "85",
},
expectedErr: nil,
},
{
name: "job not found error from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "3",
expectedResp: job.Job{},
expectedErr: ErrJobNotFound,
},
{
name: "http status is not 200, but 401 from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "4",
expectedResp: job.Job{},
expectedErr: errors.New("job status request failed; unexpected response code:'401', msg:''"),
},
{
name: "internal server error from external API",
client: New(ts.URL, "test", "123", timeout),
jobID: "333",
expectedResp: job.Job{},
expectedErr: ErrServerError,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := tc.client.StopJob(context.Background(), tc.jobID)
assert.Equal(t, err, tc.expectedErr)
assert.Equal(t, got, tc.expectedResp)
})
}
}
func TestClient_GetVirtualDevices(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/rest/v1.1/info/platforms/all":
w.WriteHeader(http.StatusOK)
w.Write([]byte(`[{"long_name": "Samsung Galaxy S7 FHD GoogleAPI Emulator", "short_version": "7.0"},{"long_name": "Samsung Galaxy S9 HD GoogleAPI Emulator", "short_version": "8.0"},{"long_name": "iPhone 6s Simulator", "short_version": "11.0"},{"long_name": "iPhone 8 Plus Simulator", "short_version": "14.3"}]`))
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
c := &Client{
HTTPClient: ts.Client(),
URL: ts.URL,
Username: "dummy-user",
AccessKey: "dummy-key",
}
type args struct {
ctx context.Context
kind string
}
tests := []struct {
name string
args args
want []vmd.VirtualDevice
wantErr bool
}{
{
name: "iOS Virtual Devices",
args: args{
ctx: context.Background(),
kind: vmd.IOSSimulator,
},
want: []vmd.VirtualDevice{
{Name: "iPhone 6s Simulator", OSVersion: []string{"11.0"}},
{Name: "iPhone 8 Plus Simulator", OSVersion: []string{"14.3"}},
},
},
{
name: "Android Virtual Devices",
args: args{
ctx: context.Background(),
kind: vmd.AndroidEmulator,
},
want: []vmd.VirtualDevice{
{Name: "Samsung Galaxy S7 FHD GoogleAPI Emulator", OSVersion: []string{"7.0"}},
{Name: "Samsung Galaxy S9 HD GoogleAPI Emulator", OSVersion: []string{"8.0"}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := c.GetVirtualDevices(tt.args.ctx, tt.args.kind)
if (err != nil) != tt.wantErr {
t.Errorf("GetVirtualDevices() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetVirtualDevices() got = %v, want %v", got, tt.want)
}
})
}
}
|
package eventlogger
import (
"fmt"
"io"
"time"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/google/uuid"
"github.com/pkg/errors"
)
// Logger is an object that writes log lines to the database in an EventLog as well as to Sink
type Logger struct {
DL persistence.EventLoggerDataLayer
ID, DeliveryID uuid.UUID
Sink io.Writer
// ExcludeID determines whether to omit the ID from log strings
ExcludeID bool
}
// Init initializes the EventLog object in the database. This must be called exactly once prior to any log lines.
// This method will persist the EventLog in the database with a null environment name
func (l *Logger) Init(webhook []byte, repo string, pr uint) error {
if l.DL == nil {
return errors.New("datalayer is nil")
}
el := &models.EventLog{
ID: l.ID,
WebhookPayload: webhook,
GitHubDeliveryID: l.DeliveryID,
Repo: repo,
PullRequest: pr,
}
return errors.Wrap(l.DL.CreateEventLog(el), "error creating event log")
}
// SetEnvName sets the environment name for this logger. Init must be called first.
func (l *Logger) SetEnvName(name string) error {
if l.DL != nil {
return l.DL.SetEventLogEnvName(l.ID, name)
}
return nil
}
// Printf writes the formatted log line to the underlying Logger
func (l *Logger) Printf(msg string, params ...interface{}) {
var idstr string
if !l.ExcludeID {
idstr = " event: " + l.ID.String()
}
msg = fmt.Sprintf(time.Now().UTC().Format("2006/01/02 15:04:05")+idstr+": "+msg+"\n", params...)
if l.DL != nil && l.ID != uuid.Nil {
if err := l.DL.AppendToEventLog(l.ID, msg); err != nil {
if l.Sink != nil {
l.Sink.Write([]byte(errors.Wrap(err, "error appending line to event log").Error()))
}
}
}
if l.Sink != nil {
l.Sink.Write([]byte(msg))
}
}
|
package operatorlister
import (
"fmt"
"sync"
v1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
rbacv1 "k8s.io/client-go/listers/rbac/v1"
)
type UnionRoleBindingLister struct {
roleBindingListers map[string]rbacv1.RoleBindingLister
roleBindingLock sync.RWMutex
}
// List lists all RoleBindings in the indexer.
func (rbl *UnionRoleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) {
rbl.roleBindingLock.RLock()
defer rbl.roleBindingLock.RUnlock()
set := make(map[types.UID]*v1.RoleBinding)
for _, dl := range rbl.roleBindingListers {
roleBindings, err := dl.List(selector)
if err != nil {
return nil, err
}
for _, roleBinding := range roleBindings {
set[roleBinding.GetUID()] = roleBinding
}
}
for _, roleBinding := range set {
ret = append(ret, roleBinding)
}
return
}
// RoleBindings returns an object that can list and get RoleBindings.
func (rbl *UnionRoleBindingLister) RoleBindings(namespace string) rbacv1.RoleBindingNamespaceLister {
rbl.roleBindingLock.RLock()
defer rbl.roleBindingLock.RUnlock()
// Check for specific namespace listers
if dl, ok := rbl.roleBindingListers[namespace]; ok {
return dl.RoleBindings(namespace)
}
// Check for any namespace-all listers
if dl, ok := rbl.roleBindingListers[metav1.NamespaceAll]; ok {
return dl.RoleBindings(namespace)
}
return &NullRoleBindingNamespaceLister{}
}
func (rbl *UnionRoleBindingLister) RegisterRoleBindingLister(namespace string, lister rbacv1.RoleBindingLister) {
rbl.roleBindingLock.Lock()
defer rbl.roleBindingLock.Unlock()
if rbl.roleBindingListers == nil {
rbl.roleBindingListers = make(map[string]rbacv1.RoleBindingLister)
}
rbl.roleBindingListers[namespace] = lister
}
func (l *rbacV1Lister) RegisterRoleBindingLister(namespace string, lister rbacv1.RoleBindingLister) {
l.roleBindingLister.RegisterRoleBindingLister(namespace, lister)
}
func (l *rbacV1Lister) RoleBindingLister() rbacv1.RoleBindingLister {
return l.roleBindingLister
}
// NullRoleBindingNamespaceLister is an implementation of a null RoleBindingNamespaceLister. It is
// used to prevent nil pointers when no RoleBindingNamespaceLister has been registered for a given
// namespace.
type NullRoleBindingNamespaceLister struct {
rbacv1.RoleBindingNamespaceLister
}
// List returns nil and an error explaining that this is a NullRoleBindingNamespaceLister.
func (n *NullRoleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) {
return nil, fmt.Errorf("cannot list RoleBindings with a NullRoleBindingNamespaceLister")
}
// Get returns nil and an error explaining that this is a NullRoleBindingNamespaceLister.
func (n *NullRoleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) {
return nil, fmt.Errorf("cannot get RoleBinding with a NullRoleBindingNamespaceLister")
}
|
package main
import (
"fmt"
"io"
"net/http"
"os"
)
type logWriter struct{}
func main() {
resp, err := http.Get("http://google.com")
if err != nil {
fmt.Println("Error:", err)
os.Exit(1)
}
lw := logWriter{}
// Same thing as below but with only one line
// First argument is the output channel where to copy the information, second argument is the input channel where the information comes from
// First argument something that implements Writer interface, second argument something that implements Reader interface
io.Copy(lw, resp.Body)
// make() is a built-in function that takes a type of a slice and second argument is the number of elements or empty spaces that the slice will be initialized with
// bs := make([]byte, 99999)
// Take the Body of the response and pass it to Read() function that will read the data and pass it into the []byte that was just created
// resp.Body.Read(bs)
// Turn bs into string before printing it out
// fmt.Println(string(bs))
}
func (logWriter) Write(bs []byte) (int, error) {
fmt.Println(string(bs))
fmt.Println("Just wrote this many bytes:", len(bs))
// To implement the Writer interface Write() must return int number of bytes read and an error
return len(bs), nil
} |
package it
import (
"bytes"
"net/url"
"os"
"testing"
"time"
"github.com/boltdb/bolt"
"github.com/fxnn/deadbox/config"
"github.com/fxnn/deadbox/crypto"
"github.com/fxnn/deadbox/daemon"
"github.com/fxnn/deadbox/drop"
"github.com/fxnn/deadbox/model"
"github.com/fxnn/deadbox/rest"
"github.com/fxnn/deadbox/worker"
)
const (
workerDbFileName = "worker.boltdb"
workerName = "itWorker"
dropDbFileName = "drop.boltdb"
dropName = "itDrop"
port = "54123"
interactionSleepTime = 500 * time.Millisecond
)
func assertWorkerTimeoutInFuture(actualWorker model.Worker, t *testing.T) {
t.Helper()
if actualWorker.Timeout.Before(time.Now()) {
t.Fatalf("expected worker timeout to be in the future, but was %s", actualWorker.Timeout)
}
}
func assertWorkerName(actualWorker model.Worker, workerName string, t *testing.T) {
t.Helper()
if string(actualWorker.Name) != workerName {
t.Fatalf("expected worker to be %s, but was %v", workerName, actualWorker)
}
}
func assertNumberOfWorkers(actualWorkers []model.Worker, expectedNumber int, t *testing.T) {
t.Helper()
if len(actualWorkers) != expectedNumber {
t.Fatalf("expected %d workers, but got %v", expectedNumber, actualWorkers)
}
}
func assertNumberOfRequests(actualRequests []model.WorkerRequest, expectedNumber int, t *testing.T) {
t.Helper()
if len(actualRequests) != expectedNumber {
t.Fatalf("expected %d requests, but got %v", expectedNumber, actualRequests)
}
}
func assertRequestId(actualRequest model.WorkerRequest, expectedId string, t *testing.T) {
t.Helper()
if string(actualRequest.Id) != expectedId {
t.Fatalf("expected request to have id %s, but got %s", expectedId, actualRequest.Id)
}
}
func assertRequestEncryptionType(actualRequest model.WorkerRequest, expectedType string, t *testing.T) {
t.Helper()
if actualRequest.EncryptionType != expectedType {
t.Fatalf("expected request to have encryptionType %s, but got %s", expectedType, actualRequest.EncryptionType)
}
}
func assertRequestContentContains(actualRequest model.WorkerRequest, expectedContentSubstring string, t *testing.T) {
t.Helper()
if !bytes.Contains(actualRequest.Content, []byte(expectedContentSubstring)) {
t.Fatalf("expected request to have content containing '%s', but got %s", expectedContentSubstring, string(actualRequest.Content))
}
}
func assertResponseContentType(actualResponse model.WorkerResponse, expectedContentType string, t *testing.T) {
t.Helper()
if string(actualResponse.ContentType) != expectedContentType {
t.Fatalf("expected response to have content type '%s', but got '%s' and content '%s'", expectedContentType, actualResponse.ContentType,
actualResponse.Content)
}
}
func assertResponseContent(actualResponse model.WorkerResponse, expectedContent string, t *testing.T) {
t.Helper()
if string(actualResponse.Content) != expectedContent {
t.Fatalf("expected response to have content '%s', but got '%s'", expectedContent, actualResponse.Content)
}
}
func runDropDaemon(t *testing.T) (daemon.Daemon, model.Drop) {
t.Helper()
cfg := &config.Drop{
Name: dropName,
ListenAddress: ":" + port,
MaxRequestTimeoutInSeconds: config.DefaultMaxRequestTimeoutInSeconds,
MaxWorkerTimeoutInSeconds: config.DefaultMaxWorkerTimeoutInSeconds,
}
db, err := bolt.Open(dropDbFileName, 0664, bolt.DefaultOptions)
if err != nil {
t.Fatalf("could not open Drop's BoltDB: %s", err)
}
dropDaemon := drop.New(cfg, db, rest.NoTLS())
dropDaemon.OnStop(func() error {
if err := db.Close(); err != nil {
return err
}
if err := os.Remove(dropDbFileName); err != nil {
return err
}
return nil
})
dropDaemon.Start()
dropClient := rest.NewClient(parseUrlOrPanic("http://localhost:"+port), nil)
return dropDaemon, dropClient
}
func runWorkerDaemon(t *testing.T) (worker.Daemonized, []byte) {
t.Helper()
cfg := &config.Worker{
Name: workerName,
DropUrl: parseUrlOrPanic("http://localhost:" + port),
RegistrationTimeoutInSeconds: config.DefaultRegistrationTimeoutInSeconds,
UpdateRegistrationIntervalInSeconds: config.DefaultUpdateRegistrationIntervalInSeconds,
}
db, err := bolt.Open(workerDbFileName, 0664, bolt.DefaultOptions)
if err != nil {
t.Fatalf("could not open Worker's BoltDB: %s", err)
}
privateKey, err := crypto.GeneratePrivateKey(2048)
if err != nil {
t.Fatalf("couldn't generate private key: %s", err)
}
publicKeyBytes, err := crypto.GeneratePublicKeyBytes(privateKey)
if err != nil {
t.Fatalf("couldn't generate public key bytes: %s", err)
}
fingerprint, err := crypto.FingerprintPublicKey(&privateKey.PublicKey, 10, 4)
if err != nil {
t.Fatalf("couldn't fingerprint public key: %s", err)
}
workerDaemon := worker.New(cfg, fingerprint, db, privateKey, config.DefaultPublicKeyFingerprintLength, config.DefaultPublicKeyFingerprintChallengeLevel)
workerDaemon.OnStop(func() error {
if err := db.Close(); err != nil {
return err
}
if err := os.Remove(workerDbFileName); err != nil {
return err
}
return nil
})
workerDaemon.Start()
return workerDaemon, publicKeyBytes
}
func stopDaemon(d daemon.Daemon, t *testing.T) {
t.Helper()
err := d.Stop()
if err != nil {
t.Error(err)
}
}
func parseUrlOrPanic(s string) *url.URL {
result, err := url.Parse(s)
if err != nil {
panic(err)
}
return result
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
// Returns results matching a query.
package search
import (
gobytes "bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
"github.com/elastic/go-elasticsearch/v8/typedapi/types"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode"
)
const (
indexMask = iota + 1
)
// ErrBuildPath is returned in case of missing parameters within the build of the request.
var ErrBuildPath = errors.New("cannot build path, check for missing path parameters")
type Search struct {
transport elastictransport.Interface
headers http.Header
values url.Values
path url.URL
buf *gobytes.Buffer
req *Request
deferred []func(request *Request) error
raw io.Reader
paramSet int
index string
}
// NewSearch type alias for index.
type NewSearch func() *Search
// NewSearchFunc returns a new instance of Search with the provided transport.
// Used in the index of the library this allows to retrieve every apis in once place.
func NewSearchFunc(tp elastictransport.Interface) NewSearch {
return func() *Search {
n := New(tp)
return n
}
}
// Returns results matching a query.
//
// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html
func New(tp elastictransport.Interface) *Search {
r := &Search{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
req: NewRequest(),
}
return r
}
// Raw takes a json payload as input which is then passed to the http.Request
// If specified Raw takes precedence on Request method.
func (r *Search) Raw(raw io.Reader) *Search {
r.raw = raw
return r
}
// Request allows to set the request property with the appropriate payload.
func (r *Search) Request(req *Request) *Search {
r.req = req
return r
}
// HttpRequest returns the http.Request object built from the
// given parameters.
func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) {
var path strings.Builder
var method string
var req *http.Request
var err error
if len(r.deferred) > 0 {
for _, f := range r.deferred {
deferredErr := f(r.req)
if deferredErr != nil {
return nil, deferredErr
}
}
}
if r.raw != nil {
r.buf.ReadFrom(r.raw)
} else if r.req != nil {
data, err := json.Marshal(r.req)
if err != nil {
return nil, fmt.Errorf("could not serialise request for Search: %w", err)
}
r.buf.Write(data)
}
r.path.Scheme = "http"
switch {
case r.paramSet == 0:
path.WriteString("/")
path.WriteString("_search")
method = http.MethodPost
case r.paramSet == indexMask:
path.WriteString("/")
path.WriteString(r.index)
path.WriteString("/")
path.WriteString("_search")
method = http.MethodPost
}
r.path.Path = path.String()
r.path.RawQuery = r.values.Encode()
if r.path.Path == "" {
return nil, ErrBuildPath
}
if ctx != nil {
req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf)
} else {
req, err = http.NewRequest(method, r.path.String(), r.buf)
}
req.Header = r.headers.Clone()
if req.Header.Get("Content-Type") == "" {
if r.buf.Len() > 0 {
req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8")
}
}
if req.Header.Get("Accept") == "" {
req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8")
}
if err != nil {
return req, fmt.Errorf("could not build http.Request: %w", err)
}
return req, nil
}
// Perform runs the http.Request through the provided transport and returns an http.Response.
func (r Search) Perform(ctx context.Context) (*http.Response, error) {
req, err := r.HttpRequest(ctx)
if err != nil {
return nil, err
}
res, err := r.transport.Perform(req)
if err != nil {
return nil, fmt.Errorf("an error happened during the Search query execution: %w", err)
}
return res, nil
}
// Do runs the request through the transport, handle the response and returns a search.Response
func (r Search) Do(ctx context.Context) (*Response, error) {
response := NewResponse()
r.TypedKeys(true)
res, err := r.Perform(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 299 {
err = json.NewDecoder(res.Body).Decode(response)
if err != nil {
return nil, err
}
return response, nil
}
errorResponse := types.NewElasticsearchError()
err = json.NewDecoder(res.Body).Decode(errorResponse)
if err != nil {
return nil, err
}
if errorResponse.Status == 0 {
errorResponse.Status = res.StatusCode
}
return nil, errorResponse
}
// Header set a key, value pair in the Search headers map.
func (r *Search) Header(key, value string) *Search {
r.headers.Set(key, value)
return r
}
// Index Comma-separated list of data streams, indices, and aliases to search.
// Supports wildcards (`*`).
// To search all data streams and indices, omit this parameter or use `*` or
// `_all`.
// API Name: index
func (r *Search) Index(index string) *Search {
r.paramSet |= indexMask
r.index = index
return r
}
// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index
// alias, or `_all` value targets only missing or closed indices.
// This behavior applies even if the request targets other open indices.
// For example, a request targeting `foo*,bar*` returns an error if an index
// starts with `foo` but no index starts with `bar`.
// API name: allow_no_indices
func (r *Search) AllowNoIndices(allownoindices bool) *Search {
r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices))
return r
}
// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard
// failures. If false, returns an error with no partial results.
// API name: allow_partial_search_results
func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search {
r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults))
return r
}
// Analyzer Analyzer to use for the query string.
// This parameter can only be used when the q query string parameter is
// specified.
// API name: analyzer
func (r *Search) Analyzer(analyzer string) *Search {
r.values.Set("analyzer", analyzer)
return r
}
// AnalyzeWildcard If true, wildcard and prefix queries are analyzed.
// This parameter can only be used when the q query string parameter is
// specified.
// API name: analyze_wildcard
func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search {
r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard))
return r
}
// BatchedReduceSize The number of shard results that should be reduced at once on the
// coordinating node.
// This value should be used as a protection mechanism to reduce the memory
// overhead per search request if the potential number of shards in the request
// can be large.
// API name: batched_reduce_size
func (r *Search) BatchedReduceSize(batchedreducesize string) *Search {
r.values.Set("batched_reduce_size", batchedreducesize)
return r
}
// CcsMinimizeRoundtrips If true, network round-trips between the coordinating node and the remote
// clusters are minimized when executing cross-cluster search (CCS) requests.
// API name: ccs_minimize_roundtrips
func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search {
r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips))
return r
}
// DefaultOperator The default operator for query string query: AND or OR.
// This parameter can only be used when the `q` query string parameter is
// specified.
// API name: default_operator
func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search {
r.values.Set("default_operator", defaultoperator.String())
return r
}
// Df Field to use as default where no field prefix is given in the query string.
// This parameter can only be used when the q query string parameter is
// specified.
// API name: df
func (r *Search) Df(df string) *Search {
r.values.Set("df", df)
return r
}
// ExpandWildcards Type of index that wildcard patterns can match.
// If the request can target data streams, this argument determines whether
// wildcard expressions match hidden data streams.
// Supports comma-separated values, such as `open,hidden`.
// API name: expand_wildcards
func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search {
tmp := []string{}
for _, item := range expandwildcards {
tmp = append(tmp, item.String())
}
r.values.Set("expand_wildcards", strings.Join(tmp, ","))
return r
}
// IgnoreThrottled If `true`, concrete, expanded or aliased indices will be ignored when frozen.
// API name: ignore_throttled
func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search {
r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled))
return r
}
// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed
// index.
// API name: ignore_unavailable
func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search {
r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable))
return r
}
// Lenient If `true`, format-based query failures (such as providing text to a numeric
// field) in the query string will be ignored.
// This parameter can only be used when the `q` query string parameter is
// specified.
// API name: lenient
func (r *Search) Lenient(lenient bool) *Search {
r.values.Set("lenient", strconv.FormatBool(lenient))
return r
}
// MaxConcurrentShardRequests Defines the number of concurrent shard requests per node this search executes
// concurrently.
// This value should be used to limit the impact of the search on the cluster in
// order to limit the number of concurrent shard requests.
// API name: max_concurrent_shard_requests
func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search {
r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests)
return r
}
// MinCompatibleShardNode The minimum version of the node that can handle the request
// Any handling node with a lower version will fail the request.
// API name: min_compatible_shard_node
func (r *Search) MinCompatibleShardNode(versionstring string) *Search {
r.values.Set("min_compatible_shard_node", versionstring)
return r
}
// Preference Nodes and shards used for the search.
// By default, Elasticsearch selects from eligible nodes and shards using
// adaptive replica selection, accounting for allocation awareness. Valid values
// are:
// `_only_local` to run the search only on shards on the local node;
// `_local` to, if possible, run the search on shards on the local node, or if
// not, select shards using the default method;
// `_only_nodes:<node-id>,<node-id>` to run the search on only the specified
// nodes IDs, where, if suitable shards exist on more than one selected node,
// use shards on those nodes using the default method, or if none of the
// specified nodes are available, select shards from any available node using
// the default method;
// `_prefer_nodes:<node-id>,<node-id>` to if possible, run the search on the
// specified nodes IDs, or if not, select shards using the default method;
// `_shards:<shard>,<shard>` to run the search only on the specified shards;
// `<custom-string>` (any string that does not start with `_`) to route searches
// with the same `<custom-string>` to the same shards in the same order.
// API name: preference
func (r *Search) Preference(preference string) *Search {
r.values.Set("preference", preference)
return r
}
// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search
// shards based on query rewriting if the number of shards the search request
// expands to exceeds the threshold.
// This filter roundtrip can limit the number of shards significantly if for
// instance a shard can not match any documents based on its rewrite method (if
// date filters are mandatory to match but the shard bounds and the query are
// disjoint).
// When unspecified, the pre-filter phase is executed if any of these conditions
// is met:
// the request targets more than 128 shards;
// the request targets one or more read-only index;
// the primary sort of the query targets an indexed field.
// API name: pre_filter_shard_size
func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search {
r.values.Set("pre_filter_shard_size", prefiltershardsize)
return r
}
// RequestCache If `true`, the caching of search results is enabled for requests where `size`
// is `0`.
// Defaults to index level settings.
// API name: request_cache
func (r *Search) RequestCache(requestcache bool) *Search {
r.values.Set("request_cache", strconv.FormatBool(requestcache))
return r
}
// Routing Custom value used to route operations to a specific shard.
// API name: routing
func (r *Search) Routing(routing string) *Search {
r.values.Set("routing", routing)
return r
}
// Scroll Period to retain the search context for scrolling. See Scroll search results.
// By default, this value cannot exceed `1d` (24 hours).
// You can change this limit using the `search.max_keep_alive` cluster-level
// setting.
// API name: scroll
func (r *Search) Scroll(duration string) *Search {
r.values.Set("scroll", duration)
return r
}
// SearchType How distributed term frequencies are calculated for relevance scoring.
// API name: search_type
func (r *Search) SearchType(searchtype searchtype.SearchType) *Search {
r.values.Set("search_type", searchtype.String())
return r
}
// SuggestField Specifies which field to use for suggestions.
// API name: suggest_field
func (r *Search) SuggestField(field string) *Search {
r.values.Set("suggest_field", field)
return r
}
// SuggestMode Specifies the suggest mode.
// This parameter can only be used when the `suggest_field` and `suggest_text`
// query string parameters are specified.
// API name: suggest_mode
func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search {
r.values.Set("suggest_mode", suggestmode.String())
return r
}
// SuggestSize Number of suggestions to return.
// This parameter can only be used when the `suggest_field` and `suggest_text`
// query string parameters are specified.
// API name: suggest_size
func (r *Search) SuggestSize(suggestsize string) *Search {
r.values.Set("suggest_size", suggestsize)
return r
}
// SuggestText The source text for which the suggestions should be returned.
// This parameter can only be used when the `suggest_field` and `suggest_text`
// query string parameters are specified.
// API name: suggest_text
func (r *Search) SuggestText(suggesttext string) *Search {
r.values.Set("suggest_text", suggesttext)
return r
}
// TypedKeys If `true`, aggregation and suggester names are be prefixed by their
// respective types in the response.
// API name: typed_keys
func (r *Search) TypedKeys(typedkeys bool) *Search {
r.values.Set("typed_keys", strconv.FormatBool(typedkeys))
return r
}
// RestTotalHitsAsInt Indicates whether `hits.total` should be rendered as an integer or an object
// in the rest search response.
// API name: rest_total_hits_as_int
func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search {
r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint))
return r
}
// SourceExcludes_ A comma-separated list of source fields to exclude from the response.
// You can also use this parameter to exclude fields from the subset specified
// in `_source_includes` query parameter.
// If the `_source` parameter is `false`, this parameter is ignored.
// API name: _source_excludes
func (r *Search) SourceExcludes_(fields ...string) *Search {
r.values.Set("_source_excludes", strings.Join(fields, ","))
return r
}
// SourceIncludes_ A comma-separated list of source fields to include in the response.
// If this parameter is specified, only these source fields are returned.
// You can exclude fields from this subset using the `_source_excludes` query
// parameter.
// If the `_source` parameter is `false`, this parameter is ignored.
// API name: _source_includes
func (r *Search) SourceIncludes_(fields ...string) *Search {
r.values.Set("_source_includes", strings.Join(fields, ","))
return r
}
// Q Query in the Lucene query string syntax using query parameter search.
// Query parameter searches do not support the full Elasticsearch Query DSL but
// are handy for testing.
// API name: q
func (r *Search) Q(q string) *Search {
r.values.Set("q", q)
return r
}
// Aggregations Defines the aggregations that are run as part of the search request.
// API name: aggregations
func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search {
r.req.Aggregations = aggregations
return r
}
// Collapse Collapses search results the values of the specified field.
// API name: collapse
func (r *Search) Collapse(collapse *types.FieldCollapse) *Search {
r.req.Collapse = collapse
return r
}
// DocvalueFields Array of wildcard (`*`) patterns.
// The request returns doc values for field names matching these patterns in the
// `hits.fields` property of the response.
// API name: docvalue_fields
func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search {
r.req.DocvalueFields = docvaluefields
return r
}
// Explain If true, returns detailed information about score computation as part of a
// hit.
// API name: explain
func (r *Search) Explain(explain bool) *Search {
r.req.Explain = &explain
return r
}
// Ext Configuration of search extensions defined by Elasticsearch plugins.
// API name: ext
func (r *Search) Ext(ext map[string]json.RawMessage) *Search {
r.req.Ext = ext
return r
}
// Fields Array of wildcard (`*`) patterns.
// The request returns values for field names matching these patterns in the
// `hits.fields` property of the response.
// API name: fields
func (r *Search) Fields(fields ...types.FieldAndFormat) *Search {
r.req.Fields = fields
return r
}
// From Starting document offset.
// Needs to be non-negative.
// By default, you cannot page through more than 10,000 hits using the `from`
// and `size` parameters.
// To page through more hits, use the `search_after` parameter.
// API name: from
func (r *Search) From(from int) *Search {
r.req.From = &from
return r
}
// Highlight Specifies the highlighter to use for retrieving highlighted snippets from one
// or more fields in your search results.
// API name: highlight
func (r *Search) Highlight(highlight *types.Highlight) *Search {
r.req.Highlight = highlight
return r
}
// IndicesBoost Boosts the _score of documents from specified indices.
// API name: indices_boost
func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search {
r.req.IndicesBoost = indicesboosts
return r
}
// Knn Defines the approximate kNN search to run.
// API name: knn
func (r *Search) Knn(knns ...types.KnnQuery) *Search {
r.req.Knn = knns
return r
}
// MinScore Minimum `_score` for matching documents.
// Documents with a lower `_score` are not included in the search results.
// API name: min_score
func (r *Search) MinScore(minscore types.Float64) *Search {
r.req.MinScore = &minscore
return r
}
// Pit Limits the search to a point in time (PIT).
// If you provide a PIT, you cannot specify an `<index>` in the request path.
// API name: pit
func (r *Search) Pit(pit *types.PointInTimeReference) *Search {
r.req.Pit = pit
return r
}
// PostFilter Use the `post_filter` parameter to filter search results.
// The search hits are filtered after the aggregations are calculated.
// A post filter has no impact on the aggregation results.
// API name: post_filter
func (r *Search) PostFilter(postfilter *types.Query) *Search {
r.req.PostFilter = postfilter
return r
}
// Profile Set to `true` to return detailed timing information about the execution of
// individual components in a search request.
// NOTE: This is a debugging tool and adds significant overhead to search
// execution.
// API name: profile
func (r *Search) Profile(profile bool) *Search {
r.req.Profile = &profile
return r
}
// Query Defines the search definition using the Query DSL.
// API name: query
func (r *Search) Query(query *types.Query) *Search {
r.req.Query = query
return r
}
// Rank Defines the Reciprocal Rank Fusion (RRF) to use.
// API name: rank
func (r *Search) Rank(rank *types.RankContainer) *Search {
r.req.Rank = rank
return r
}
// Rescore Can be used to improve precision by reordering just the top (for example 100
// - 500) documents returned by the `query` and `post_filter` phases.
// API name: rescore
func (r *Search) Rescore(rescores ...types.Rescore) *Search {
r.req.Rescore = rescores
return r
}
// RuntimeMappings Defines one or more runtime fields in the search request.
// These fields take precedence over mapped fields with the same name.
// API name: runtime_mappings
func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search {
r.req.RuntimeMappings = runtimefields
return r
}
// ScriptFields Retrieve a script evaluation (based on different fields) for each hit.
// API name: script_fields
func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search {
r.req.ScriptFields = scriptfields
return r
}
// SearchAfter Used to retrieve the next page of hits using a set of sort values from the
// previous page.
// API name: search_after
func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search {
r.req.SearchAfter = sortresults
return r
}
// SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification
// of each hit.
// API name: seq_no_primary_term
func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search {
r.req.SeqNoPrimaryTerm = &seqnoprimaryterm
return r
}
// Size The number of hits to return.
// By default, you cannot page through more than 10,000 hits using the `from`
// and `size` parameters.
// To page through more hits, use the `search_after` parameter.
// API name: size
func (r *Search) Size(size int) *Search {
r.req.Size = &size
return r
}
// Slice Can be used to split a scrolled search into multiple slices that can be
// consumed independently.
// API name: slice
func (r *Search) Slice(slice *types.SlicedScroll) *Search {
r.req.Slice = slice
return r
}
// Sort A comma-separated list of <field>:<direction> pairs.
// API name: sort
func (r *Search) Sort(sorts ...types.SortCombinations) *Search {
r.req.Sort = sorts
return r
}
// Source_ Indicates which source fields are returned for matching documents.
// These fields are returned in the hits._source property of the search
// response.
// API name: _source
func (r *Search) Source_(sourceconfig types.SourceConfig) *Search {
r.req.Source_ = sourceconfig
return r
}
// Stats Stats groups to associate with the search.
// Each group maintains a statistics aggregation for its associated searches.
// You can retrieve these stats using the indices stats API.
// API name: stats
func (r *Search) Stats(stats ...string) *Search {
r.req.Stats = stats
return r
}
// StoredFields List of stored fields to return as part of a hit.
// If no fields are specified, no stored fields are included in the response.
// If this field is specified, the `_source` parameter defaults to `false`.
// You can pass `_source: true` to return both source fields and stored fields
// in the search response.
// API name: stored_fields
func (r *Search) StoredFields(fields ...string) *Search {
r.req.StoredFields = fields
return r
}
// Suggest Defines a suggester that provides similar looking terms based on a provided
// text.
// API name: suggest
func (r *Search) Suggest(suggest *types.Suggester) *Search {
r.req.Suggest = suggest
return r
}
// TerminateAfter Maximum number of documents to collect for each shard.
// If a query reaches this limit, Elasticsearch terminates the query early.
// Elasticsearch collects documents before sorting.
// Use with caution.
// Elasticsearch applies this parameter to each shard handling the request.
// When possible, let Elasticsearch perform early termination automatically.
// Avoid specifying this parameter for requests that target data streams with
// backing indices across multiple data tiers.
// If set to `0` (default), the query does not terminate early.
// API name: terminate_after
func (r *Search) TerminateAfter(terminateafter int64) *Search {
r.req.TerminateAfter = &terminateafter
return r
}
// Timeout Specifies the period of time to wait for a response from each shard.
// If no response is received before the timeout expires, the request fails and
// returns an error.
// Defaults to no timeout.
// API name: timeout
func (r *Search) Timeout(timeout string) *Search {
r.req.Timeout = &timeout
return r
}
// TrackScores If true, calculate and return document scores, even if the scores are not
// used for sorting.
// API name: track_scores
func (r *Search) TrackScores(trackscores bool) *Search {
r.req.TrackScores = &trackscores
return r
}
// TrackTotalHits Number of hits matching the query to count accurately.
// If `true`, the exact number of hits is returned at the cost of some
// performance.
// If `false`, the response does not include the total number of hits matching
// the query.
// API name: track_total_hits
func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search {
r.req.TrackTotalHits = trackhits
return r
}
// Version If true, returns document version as part of a hit.
// API name: version
func (r *Search) Version(version bool) *Search {
r.req.Version = &version
return r
}
|
package fixtures
import "github.com/pkg/errors"
func foo(a bool, b int) (err error) {
n, err := bar()
if n == 0 {
return errors.Wrap(err, "aaaa") // MATCH /errors.Wrap nil/
}
if n == 0 && err != nil {
return errors.Wrap(err, "ccc")
}
if err != nil {
return errors.Wrap(err, "xxxx")
}
return nil
}
func bar() (int, error) {
return 0, nil
}
func foo2(a bool, b int) (n int, err error) {
n, err = bar()
if n == 0 {
return 0, errors.Wrap(err, "aaaa") // MATCH /errors.Wrap nil/
}
if n == 0 && err != nil {
return 0, errors.Wrap(err, "ccc")
}
if err != nil {
return 0, errors.Wrap(err, "xxxx")
}
return 0, nil
}
func foo(a bool, b int) (n int, err error) {
n, err = bar()
if n == 0 {
if err != nil {
return 0, errors.Wrap(err, "aaaa")
}
}
if n == 0 && err != nil {
return 0, errors.Wrap(err, "ccc")
}
if n == 0 || err != nil {
return 0, errors.Wrap(err, "zzz") // MATCH /errors.Wrap nil/
}
if err != nil {
return 0, errors.Wrap(err, "xxxx")
}
if err != nil || tceProject == nil {
return 0, errors.Wrapf(err, "get serviceId from tce with err:%v, psm:%s", err, psm) // MATCH /errors.Wrap nil/
} else {
return tceProject.ID, nil
}
return 0, nil
}
|
// Copyright © 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package signer is a package that provides application-level signing operations.
package signer
import (
"context"
"github.com/attestantio/go-eth2-client/spec/altair"
"github.com/attestantio/go-eth2-client/spec/phase0"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
)
// Service is the generic signer service.
type Service interface{}
// AggregateAndProofSigner provides methods to sign aggregate and proofs.
type AggregateAndProofSigner interface {
// SignAggregateAndProof signs an aggregate attestation for given slot and root.
SignAggregateAndProof(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
root phase0.Root,
) (
phase0.BLSSignature,
error,
)
}
// BeaconAttestationSigner provides methods to sign beacon attestations.
type BeaconAttestationSigner interface {
// SignBeaconAttestation signs a beacon attestation.
SignBeaconAttestation(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
committeeIndex phase0.CommitteeIndex,
blockRoot phase0.Root,
sourceEpoch phase0.Epoch,
sourceRoot phase0.Root,
targetEpoch phase0.Epoch,
targetRoot phase0.Root,
) (
phase0.BLSSignature,
error,
)
}
// BeaconAttestationsSigner provides methods to sign multiple beacon attestations.
type BeaconAttestationsSigner interface {
// SignBeaconAttestation signs multiple beacon attestations.
SignBeaconAttestations(ctx context.Context,
accounts []e2wtypes.Account,
slot phase0.Slot,
committeeIndices []phase0.CommitteeIndex,
blockRoot phase0.Root,
sourceEpoch phase0.Epoch,
sourceRoot phase0.Root,
targetEpoch phase0.Epoch,
targetRoot phase0.Root,
) (
[]phase0.BLSSignature,
error,
)
}
// BeaconBlockSigner provides methods to sign beacon blocks.
type BeaconBlockSigner interface {
// SignBeaconBlockProposal signs a beacon block proposal.
SignBeaconBlockProposal(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
proposerIndex phase0.ValidatorIndex,
parentRoot phase0.Root,
stateRoot phase0.Root,
bodyRoot phase0.Root,
) (
phase0.BLSSignature,
error,
)
}
// RANDAORevealSigner provides methods to sign RANDAO reveals.
type RANDAORevealSigner interface {
// SignRANDAOReveal returns a RANDAO signature.
// This signs an epoch with the "RANDAO" domain.
SignRANDAOReveal(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
) (
phase0.BLSSignature,
error,
)
}
// SlotSelectionSigner provides methods to sign slot selections.
type SlotSelectionSigner interface {
// SignSlotSelection returns a slot selection signature.
// This signs a slot with the "selection proof" domain.
SignSlotSelection(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
) (
phase0.BLSSignature,
error,
)
}
// SyncCommitteeRootSigner provides methods to sign a sync committee root.
type SyncCommitteeRootSigner interface {
// SignSyncCommittee returns a root signature.
// This signs a beacon block root with the "sync committee" domain.
SignSyncCommitteeRoot(ctx context.Context,
account e2wtypes.Account,
epoch phase0.Epoch,
root phase0.Root,
) (
phase0.BLSSignature,
error,
)
}
// SyncCommitteeSelectionSigner provides methods to sign sync committee selections.
type SyncCommitteeSelectionSigner interface {
// SignSyncCommitteeSelection returns a sync committee selection signature.
// This signs a slot and subcommittee with the "sync committee selection proof" domain.
SignSyncCommitteeSelection(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
subcommitteeIndex uint64,
) (
phase0.BLSSignature,
error,
)
}
// ContributionAndProofSigner provides methods to sign contribution and proofs.
type ContributionAndProofSigner interface {
// SignContributionAndProof signs a sync committee contribution for given slot and root.
SignContributionAndProof(ctx context.Context,
account e2wtypes.Account,
contributionAndProof *altair.ContributionAndProof,
) (
phase0.BLSSignature,
error,
)
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log_test
import (
"io"
"os"
"testing"
zaplog "github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/util/logutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestConfigAdjust(t *testing.T) {
cfg := &log.Config{}
cfg.Adjust()
require.Equal(t, "info", cfg.Level)
cfg.File = "."
err := log.InitLogger(cfg, "info")
require.EqualError(t, err, "can't use directory as log file name")
}
func TestTestLogger(t *testing.T) {
logger, buffer := log.MakeTestLogger()
logger.Warn("the message", zap.Int("number", 123456), zap.Ints("array", []int{7, 8, 9}))
require.Equal(t, `{"$lvl":"WARN","$msg":"the message","number":123456,"array":[7,8,9]}`, buffer.Stripped())
}
func TestInitStdoutLogger(t *testing.T) {
r, w, err := os.Pipe()
require.NoError(t, err)
oldStdout := os.Stdout
os.Stdout = w
msg := "logger is initialized to stdout"
outputC := make(chan string, 1)
go func() {
buf := make([]byte, 4096)
n := 0
for {
nn, err := r.Read(buf[n:])
if nn == 0 || err == io.EOF {
break
}
require.NoError(t, err)
n += nn
}
outputC <- string(buf[:n])
}()
logCfg := &log.Config{File: "-"}
err = log.InitLogger(logCfg, "info")
require.NoError(t, err)
log.L().Info(msg)
os.Stdout = oldStdout
require.NoError(t, w.Close())
output := <-outputC
require.NoError(t, r.Close())
require.Contains(t, output, msg)
// filter packages on default
require.Equal(t, "", os.Getenv(logutil.GRPCDebugEnvName))
require.IsType(t, &log.FilterCore{}, log.L().Logger.Core())
// output all packages when EnableDiagnoseLogs=true
logCfg.EnableDiagnoseLogs = true
require.NoError(t, log.InitLogger(logCfg, "info"))
require.IsType(t, &zaplog.TextIOCore{}, log.L().Logger.Core())
require.Equal(t, "true", os.Getenv(logutil.GRPCDebugEnvName))
// reset GRPCDebugEnvName
require.NoError(t, os.Unsetenv(logutil.GRPCDebugEnvName))
}
|
package config
import (
"encoding/json"
"github.com/pkg/errors"
"io/ioutil"
)
// 配置文件 获取
type LogConfig struct {
LogPath string `json:"log_path"`
LogLevel string `json:"log_level"`
}
type Config struct {
LogConfig LogConfig `json:"log_config"`
DBConfig DBConfig `json:"db_config"`
RedisConfig RedisConfig `json:"redis_config"`
}
// 数据库 配置
type DBConfig struct {
DbHost string `json:"db_host"`
DbPort string `json:"db_port"`
DbUser string `json:"db_user"`
DbPassword string `json:"db_password"`
DbName string `json:"db_name"`
}
//redis配置
type RedisConfig struct {
Addr string `json:"addr"`
Password string `json:"password"`
DB int `json:"db"`
}
//解析json 处理
var conf Config
// 初始化配置文件
func InitConfig(configPath string) error {
configFile, err := ioutil.ReadFile(configPath)
if err != nil {
err = errors.Wrap(err, "读取配置文件失败")
return err
}
err = json.Unmarshal(configFile, &conf)
if err != nil {
err = errors.Wrap(err,"删除配置文件失败。")
}
return nil
}
func GetConfig() Config{
return conf
}
|
package sarama
import (
"testing"
)
func TestActorWorker(t *testing.T){
kafkaTopic := "air_temperature_10"
StartProducer(kafkaTopic, 308593)
StartAverageCalcConsumer(kafkaTopic)
} |
package config
import (
"os"
)
type Configuration struct {
DatabaseURI string
ListenAddress string
Secret string
Debug bool
EnvironmentName string
}
var C = Configuration{}
func getEnv(variable string) string {
prefix := "LINKEDLOCKED_"
return os.Getenv(prefix + variable)
}
func init() {
C.DatabaseURI = getEnv("DATABASE_URI")
C.Secret = getEnv("SECRET")
C.ListenAddress = getEnv("LISTEN_ADDRESS")
C.EnvironmentName = getEnv("ENV_NAME")
if getEnv("DEBUG") == "true" {
C.Debug = true
} else {
C.Debug = false
}
}
|
package stateful
import (
"context"
"fmt"
aliceapi "github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/alice/api"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/errors"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/model"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/todolist"
)
type deleteListAction struct {
listID model.TODOListID
listName string
confirmed bool
}
func (h *Handler) deleteListFromScratch(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
if req.Request.Type != aliceapi.RequestTypeSimple {
return nil, nil
}
intnt := req.Request.NLU.Intents.DeleteList
if intnt == nil {
return nil, nil
}
var action deleteListAction
listName, ok := intnt.Slots.ListName.AsString()
if ok {
action.listName = listName
}
return h.doDeleteList(ctx, &action)
}
func (h *Handler) deleteListReqList(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
action := &deleteListAction{}
if req.Request.Type == aliceapi.RequestTypeButton {
if req.Request.Payload.ChooseListName == "" {
return nil, nil
}
action.listID = req.Request.Payload.ChooseListID
action.listName = req.Request.Payload.ChooseListName
return h.doDeleteList(ctx, action)
}
action.listName = req.Request.OriginalUtterance
return h.doDeleteList(ctx, action)
}
func (h *Handler) deleteListReqConfirm(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
if req.Request.Type != aliceapi.RequestTypeSimple {
return nil, nil
}
if req.Request.NLU.Intents.Confirm == nil {
// fallback to "didn't recognize what do you want" since explicit rejection is handled on a higher level
return nil, nil
}
return h.doDeleteList(ctx, &deleteListAction{
listName: req.State.Session.ListName,
listID: req.State.Session.ListID,
confirmed: true,
})
}
func (h *Handler) doDeleteList(ctx context.Context, action *deleteListAction) (*aliceapi.Response, errors.Err) {
if action.listName == "" {
// list not selected, going to ask user
listButtons, err := h.suggestListButtons(ctx)
if err != nil {
return nil, err
}
if len(listButtons) == 0 {
return respondNoLists("У вас пока нет ни одного списка"), nil
}
return &aliceapi.Response{
Response: &aliceapi.Resp{
Text: "Какой список вы хотите удалить?",
Buttons: listButtons,
},
State: &aliceapi.StateData{
State: aliceapi.StateDelReqName,
},
}, nil
}
if action.listID == "" {
// list name selected but not resolved to id
entry, err := h.findListByName(ctx, action.listName)
if err != nil {
return nil, err
}
if entry == nil {
return &aliceapi.Response{Response: &aliceapi.Resp{
Text: fmt.Sprintf("Я не нашла у вас список \"%s\"", action.listName),
}}, nil
}
action.listID = entry.ListID
action.listName = entry.Alias
}
if !action.confirmed {
return &aliceapi.Response{
Response: &aliceapi.Resp{
Text: fmt.Sprintf("Вы точно хотите удалить \"%s\"?", action.listName),
},
State: &aliceapi.StateData{
State: aliceapi.StateDelReqConfirm,
ListID: action.listID,
ListName: action.listName,
},
}, nil
}
err := h.todoListService.RemoveList(ctx, &todolist.RemoveListRequest{ID: action.listID})
if err == nil {
return &aliceapi.Response{Response: &aliceapi.Resp{
Text: fmt.Sprintf("Готово, удалила список \"%s\"", action.listName),
}}, nil
}
return nil, err
}
|
// Copyright 2018 Kuei-chun Chen. All rights reserved.
package mdb
import (
"context"
"testing"
"github.com/simagix/gox"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
)
func TestGetAllShardURIstWithConn(t *testing.T) {
var err error
UnitTestURL = "mongodb://user:password@localhost/"
client := getMongoClient()
defer client.Disconnect(context.Background())
var shards []Shard
if shards, err = GetShards(client); err != nil {
t.Fatal(err)
}
connString, _ := connstring.Parse(UnitTestURL)
if _, err := GetAllShardURIs(shards, connString); err != nil {
t.Fatal(err)
}
for _, shard := range shards {
t.Log(gox.Stringify(shard, "", " "))
}
}
func TestGetAllShardURIs(t *testing.T) {
uri := "mongodb://user:secret@localhost/?authSource=admin&w=2&readPreference=secondary&tls=true"
connString, err := connstring.Parse(uri)
if err != nil {
t.Fatal(err)
}
shards := []Shard{
{ID: "shard-0", Host: "shard0/shard-00-00:27017,shard-00-01:27017,shard-00-02:27017", State: 1},
{ID: "shard-1", Host: "shard1/shard-01-00:27017,shard-01-01:27017,shard-01-02:27017", State: 1},
}
var list []string
if list, err = GetAllShardURIs(shards, connString); err != nil {
t.Fatal(err)
}
if len(list) != 2 {
t.Fatal("expected 2, but got", len(list))
}
expected := []string{
"mongodb://user:secret@shard-00-00:27017,shard-00-01:27017,shard-00-02:27017/?replicaSet=shard0&authSource=admin&tls=true&readPreference=secondary&w=2",
"mongodb://user:secret@shard-01-00:27017,shard-01-01:27017,shard-01-02:27017/?replicaSet=shard1&authSource=admin&tls=true&readPreference=secondary&w=2",
}
for i, v := range list {
assertEqual(t, v, expected[i])
}
}
func TestGetAllServerURIs(t *testing.T) {
uri := "mongodb+srv://user:secret@keyhole.example.com/db"
connString, _ := connstring.Parse(uri)
shards := []Shard{
{ID: "shard-0", Host: "shard0/shard-00-00:27017,shard-00-01:27017,shard-00-02:27017", State: 1},
{ID: "shard-1", Host: "shard1/shard-01-00:27017,shard-01-01:27017,shard-01-02:27017", State: 1},
}
var list []string
var err error
if list, err = GetAllServerURIs(shards, connString); err != nil {
t.Fatal(err)
}
if len(list) != 6 {
t.Fatal("expected 6, but got", len(list))
}
expected := []string{
"mongodb://user:secret@shard-00-00:27017/?authSource=admin&ssl=true",
"mongodb://user:secret@shard-00-01:27017/?authSource=admin&ssl=true",
"mongodb://user:secret@shard-00-02:27017/?authSource=admin&ssl=true",
"mongodb://user:secret@shard-01-00:27017/?authSource=admin&ssl=true",
"mongodb://user:secret@shard-01-01:27017/?authSource=admin&ssl=true",
"mongodb://user:secret@shard-01-02:27017/?authSource=admin&ssl=true",
}
for i, v := range list {
if v != expected[i] {
t.Log(v)
t.Log(expected[i])
t.Fatal("not expected")
}
}
}
func TestGetQueryParams(t *testing.T) {
var err error
var cs connstring.ConnString
var expected string
uri := "mongodb+srv://user:password@tags.jgtm2.mongodb.net/keyhole?readPreference=secondary&readPreferenceTags=nodeType:ANALYTICS"
if cs, err = connstring.Parse(uri); err != nil {
t.Fatal(err)
}
expected = "&tls=true&readPreference=secondary&readPreferenceTags=nodeType:ANALYTICS"
assertEqual(t, expected, GetQueryParams(cs, false))
expected = "&tls=true"
assertEqual(t, expected, GetQueryParams(cs, true))
}
func assertEqual(t *testing.T, a interface{}, b interface{}) {
if a != b {
t.Fatalf("%s != %s", a, b)
}
}
|
package accesslog
import (
"net/http"
"strings"
)
func readResponseHeaders(header http.Header) map[string]string {
headers := map[string]string{}
for k, v := range header {
headers[k] = strings.Join(v, " ")
}
return headers
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cascades
import (
"context"
"testing"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/model"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/memo"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/stretchr/testify/require"
)
func testGroupToString(t *testing.T, input []string, output []struct {
SQL string
Result []string
}, optimizer *Optimizer) {
p := parser.New()
ctx := plannercore.MockContext()
is := infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()})
domain.GetDomain(ctx).MockInfoCacheAndLoadInfoSchema(is)
for i, sql := range input {
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is)
require.NoError(t, err)
logic, ok := plan.(plannercore.LogicalPlan)
require.True(t, ok)
logic, err = optimizer.onPhasePreprocessing(ctx, logic)
require.NoError(t, err)
group := memo.Convert2Group(logic)
err = optimizer.onPhaseExploration(ctx, group)
require.NoError(t, err)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Result = ToString(group)
})
require.Equalf(t, output[i].Result, ToString(group), "case:%v, sql:%s", i, sql)
}
}
func TestAggPushDownGather(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(TransformationRuleBatch{
memo.OperandAggregation: {
NewRulePushAggDownGather(),
},
memo.OperandDataSource: {
NewRuleEnumeratePaths(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
p := parser.New()
ctx := plannercore.MockContext()
is := infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()})
domain.GetDomain(ctx).MockInfoCacheAndLoadInfoSchema(is)
for i, sql := range input {
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is)
require.NoError(t, err)
logic, ok := plan.(plannercore.LogicalPlan)
require.True(t, ok)
logic, err = optimizer.onPhasePreprocessing(ctx, logic)
require.NoError(t, err)
group := memo.Convert2Group(logic)
err = optimizer.onPhaseExploration(ctx, group)
require.NoError(t, err)
// BuildKeyInfo here to test the KeyInfo for partialAgg.
group.BuildKeyInfo()
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Result = ToString(group)
})
require.Equalf(t, output[i].Result, ToString(group), "case:%v, sql:%s", i, sql)
}
}
func TestPredicatePushDown(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(
TransformationRuleBatch{ // TiDB layer
memo.OperandSelection: {
NewRulePushSelDownSort(),
NewRulePushSelDownProjection(),
NewRulePushSelDownAggregation(),
NewRulePushSelDownJoin(),
NewRulePushSelDownUnionAll(),
NewRulePushSelDownWindow(),
NewRuleMergeAdjacentSelection(),
},
memo.OperandJoin: {
NewRuleTransformJoinCondToSel(),
},
},
TransformationRuleBatch{ // TiKV layer
memo.OperandSelection: {
NewRulePushSelDownTableScan(),
NewRulePushSelDownTiKVSingleGather(),
NewRulePushSelDownIndexScan(),
},
memo.OperandDataSource: {
NewRuleEnumeratePaths(),
},
},
)
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestTopNRules(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(
TransformationRuleBatch{ // TiDB layer
memo.OperandLimit: {
NewRuleTransformLimitToTopN(),
NewRulePushLimitDownProjection(),
NewRulePushLimitDownUnionAll(),
NewRulePushLimitDownOuterJoin(),
NewRuleMergeAdjacentLimit(),
},
memo.OperandTopN: {
NewRulePushTopNDownProjection(),
NewRulePushTopNDownOuterJoin(),
NewRulePushTopNDownUnionAll(),
},
},
TransformationRuleBatch{ // TiKV layer
memo.OperandLimit: {
NewRulePushLimitDownTiKVSingleGather(),
},
memo.OperandTopN: {
NewRulePushTopNDownTiKVSingleGather(),
},
memo.OperandDataSource: {
NewRuleEnumeratePaths(),
},
},
)
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestProjectionElimination(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(TransformationRuleBatch{
memo.OperandProjection: {
NewRuleEliminateProjection(),
NewRuleMergeAdjacentProjection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestEliminateMaxMin(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleEliminateSingleMaxMin(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestMergeAggregationProjection(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleMergeAggregationProjection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestMergeAdjacentTopN(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandLimit: {
NewRuleTransformLimitToTopN(),
},
memo.OperandTopN: {
NewRulePushTopNDownProjection(),
NewRuleMergeAdjacentTopN(),
},
memo.OperandProjection: {
NewRuleMergeAdjacentProjection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestMergeAdjacentLimit(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(TransformationRuleBatch{
memo.OperandLimit: {
NewRulePushLimitDownProjection(),
NewRuleMergeAdjacentLimit(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestTransformLimitToTableDual(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(TransformationRuleBatch{
memo.OperandLimit: {
NewRuleTransformLimitToTableDual(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestPostTransformationRules(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(TransformationRuleBatch{
memo.OperandLimit: {
NewRuleTransformLimitToTopN(),
},
}, PostTransformationBatch)
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestPushLimitDownTiKVSingleGather(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandLimit: {
NewRulePushLimitDownTiKVSingleGather(),
},
memo.OperandProjection: {
NewRuleEliminateProjection(),
},
memo.OperandDataSource: {
NewRuleEnumeratePaths(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestEliminateOuterJoin(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleEliminateOuterJoinBelowAggregation(),
},
memo.OperandProjection: {
NewRuleEliminateOuterJoinBelowProjection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestTransformAggregateCaseToSelection(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleTransformAggregateCaseToSelection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestTransformAggToProj(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleTransformAggToProj(),
},
memo.OperandProjection: {
NewRuleMergeAdjacentProjection(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestDecorrelate(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandApply: {
NewRulePullSelectionUpApply(),
NewRuleTransformApplyToJoin(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestInjectProj(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandLimit: {
NewRuleTransformLimitToTopN(),
},
}, map[memo.Operand][]Transformation{
memo.OperandAggregation: {
NewRuleInjectProjectionBelowAgg(),
},
memo.OperandTopN: {
NewRuleInjectProjectionBelowTopN(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
func TestMergeAdjacentWindow(t *testing.T) {
optimizer := NewOptimizer()
optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{
memo.OperandProjection: {
NewRuleMergeAdjacentProjection(),
NewRuleEliminateProjection(),
},
memo.OperandWindow: {
NewRuleMergeAdjacentWindow(),
},
})
defer func() {
optimizer.ResetTransformationRules(DefaultRuleBatches...)
}()
var input []string
var output []struct {
SQL string
Result []string
}
transformationRulesSuiteData.LoadTestCases(t, &input, &output)
testGroupToString(t, input, output, optimizer)
}
|
// +build !spell
package main
type Spellcheck struct {
check CheckFunc
}
func NewSpellcheck(ts TokenSet, ignoreFile string) (*Spellcheck, error) {
return &Spellcheck{func(w string) bool { return true }}, nil
}
func (sc *Spellcheck) Close() {}
func (sc *Spellcheck) WithPassTokens() CheckFunc {
return func(string) bool { return true }
}
func (sc *Spellcheck) WithSpeller() CheckFunc {
return func(string) bool { return true }
}
func (sc *Spellcheck) Check() CheckPipe {
return func(lc <-chan *Lexeme, outc chan<- *CheckedLexeme) {
for range lc {
}
}
}
|
package view
import (
"fmt"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
"log"
"projja_telegram/command/current_project/controller"
"projja_telegram/command/current_project/menu"
"projja_telegram/command/util"
"projja_telegram/model"
"strconv"
)
func ChangeProjectStatuses(botUtil *util.BotUtil, project *model.Project) {
page := 1
taskStatuses, msg, status := ShowTaskStatusesMenu(botUtil, project, page)
botUtil.Bot.Send(msg)
if !status {
return
}
for update := range botUtil.Updates {
mes := update.Message
command := ""
if mes.Text != "" {
command = mes.Text
}
switch command {
case "Добавить статус":
msg = CreateTaskStatus(botUtil, project, taskStatuses)
botUtil.Bot.Send(msg)
case "Удалить статус":
msg = RemoveTaskStatus(botUtil, project, taskStatuses)
botUtil.Bot.Send(msg)
case "Предыдущая страница":
page--
case "Следующая страница":
page++
case "Назад":
return
default:
msg = util.GetUnknownMessage(botUtil)
botUtil.Bot.Send(msg)
}
taskStatuses, msg, status = ShowTaskStatusesMenu(botUtil, project, page)
botUtil.Bot.Send(msg)
if !status {
return
}
}
}
func ShowTaskStatusesMenu(botUtil *util.BotUtil, project *model.Project, page int) ([]*model.TaskStatus, tgbotapi.MessageConfig, bool) {
taskStatuses, status := controller.GetStatuses(project)
if !status {
errorText := "Не удалось получить список статусов задач\n" +
"Попробуйте позже"
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, errorText)
return nil, msg, false
}
count := len(taskStatuses) - (page-1)*4
if count > 4 {
count = 4
}
msg := menu.MakeTaskStatusesMenu(botUtil.Message, project, taskStatuses, page, count)
return taskStatuses, msg, true
}
func CreateTaskStatus(botUtil *util.BotUtil, project *model.Project, taskStatuses []*model.TaskStatus) tgbotapi.MessageConfig {
text, cancelStatus := util.ListenForText(botUtil,
"Введите название нового статуса",
"Отмена создания статуса задач",
)
if !cancelStatus {
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
}
newTaskStatusStatus := text
for _, taskStatus := range taskStatuses {
if taskStatus.Status == newTaskStatusStatus {
text = "Данный статус уже добавлен в проект"
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
}
}
text, cancelStatus = listenForLevel(botUtil, taskStatuses)
if !cancelStatus {
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
}
newTaskStatusLevel, err := strconv.Atoi(text)
if err != nil {
log.Println("error in casting task status level: ", err)
errorText := "Во время создания статуса произошла ошибка\nПопробуйте позже"
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, errorText)
return msg
}
newTaskStatus := &model.TaskStatus{
Status: newTaskStatusStatus,
Level: newTaskStatusLevel,
}
acceptingString := fmt.Sprintf("Вы хотите создать:\n"+
"Статус: %s\n"+
"Level: %d\n",
newTaskStatus.Status,
newTaskStatus.Level,
)
msg := util.GetAcceptingMessage(botUtil.Message, acceptingString)
botUtil.Bot.Send(msg)
for update := range botUtil.Updates {
mes := update.Message
command := ""
if mes.Text != "" {
command = mes.Text
}
switch command {
case "Да":
text, _ = controller.CreateTaskStatus(project, newTaskStatus)
goto LOOP
case "Нет":
text = "Отмена создания статуса задач"
goto LOOP
default:
text = "Пожалуйста, выберите один из вариантов"
msg = tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
botUtil.Bot.Send(msg)
msg = util.GetAcceptingMessage(botUtil.Message, acceptingString)
botUtil.Bot.Send(msg)
}
}
LOOP:
msg = tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
}
func listenForLevel(botUtil *util.BotUtil, taskStatuses []*model.TaskStatus) (string, bool) {
mesText := "Выберите уровень для нового статуса"
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, mesText)
rows := make([][]tgbotapi.KeyboardButton, 0)
i := 0
for i < len(taskStatuses)+1 {
statusesRow := make([]tgbotapi.KeyboardButton, 0)
firstRowStatusBtn := tgbotapi.NewKeyboardButton(strconv.Itoa(i + 1))
statusesRow = append(statusesRow, firstRowStatusBtn)
i++
if i != len(taskStatuses)+1 {
secondRowStatusBtn := tgbotapi.NewKeyboardButton(strconv.Itoa(i + 1))
statusesRow = append(statusesRow, secondRowStatusBtn)
i++
}
rows = append(rows, statusesRow)
}
row := make([]tgbotapi.KeyboardButton, 0)
cancelBtn := tgbotapi.NewKeyboardButton("Отмена")
row = append(row, cancelBtn)
rows = append(rows, row)
keyboard := tgbotapi.NewReplyKeyboard(rows...)
msg.ReplyMarkup = keyboard
botUtil.Bot.Send(msg)
cancelText := "Отмена создания статуса задач"
resultText := ""
for update := range botUtil.Updates {
mes := update.Message
command := ""
if mes.Text != "" {
command = mes.Text
}
switch command {
case "Отмена":
resultText = cancelText
return resultText, false
default:
if command == "" {
botUtil.Bot.Send(msg)
continue
}
resultLevel, err := strconv.Atoi(command)
if err != nil {
text := fmt.Sprintf("Вы ввели не уровень, а '%s'", command)
errorMsg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
botUtil.Bot.Send(errorMsg)
botUtil.Bot.Send(msg)
continue
}
if resultLevel < 1 || resultLevel > i {
text := "Вы ввели неверный номер уровеня"
errorMsg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
botUtil.Bot.Send(errorMsg)
botUtil.Bot.Send(msg)
continue
}
resultText = command
}
if resultText != "" {
break
}
}
return resultText, true
}
func RemoveTaskStatus(botUtil *util.BotUtil, project *model.Project, taskStatuses []*model.TaskStatus) tgbotapi.MessageConfig {
page := 1
count := len(taskStatuses) - (page-1)*4
if count > 4 {
count = 4
}
msg := menu.MakeTaskStatusesRemovingMenu(botUtil.Message, taskStatuses, page, count)
botUtil.Bot.Send(msg)
statusIndex := -1
for update := range botUtil.Updates {
mes := update.Message
command := ""
exit := false
if mes.Text != "" {
command = mes.Text
}
switch command {
case "Отмена":
text := "Отмена удаления статуса задач"
msg = tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
case "Предыдущая страница":
page--
case "Следующая страница":
page++
default:
text, index, status := IsTaskStatusStatus(taskStatuses, command)
statusIndex = index
if !status {
msg := tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
botUtil.Bot.Send(msg)
} else {
exit = true
}
}
if exit && statusIndex != -1 {
break
}
botUtil.Bot.Send(msg)
}
taskStatus := taskStatuses[statusIndex]
acceptingString := fmt.Sprintf("Вы хотите удалить статус задач '%s'", taskStatus.Status)
msg = util.GetAcceptingMessage(botUtil.Message, acceptingString)
botUtil.Bot.Send(msg)
var text string
for update := range botUtil.Updates {
mes := update.Message
command := ""
if mes.Text != "" {
command = mes.Text
}
switch command {
case "Да":
text, _ = controller.RemoveTaskStatus(project, taskStatus)
goto LOOP
case "Нет":
text = "Отмена удаления статуса задач"
goto LOOP
default:
text = "Пожалуйста, выберите один из вариантов"
msg = tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
botUtil.Bot.Send(msg)
msg = util.GetAcceptingMessage(botUtil.Message, acceptingString)
botUtil.Bot.Send(msg)
}
}
LOOP:
msg = tgbotapi.NewMessage(botUtil.Message.Chat.ID, text)
return msg
}
func IsTaskStatusStatus(statuses []*model.TaskStatus, command string) (string, int, bool) {
if command == "" {
text := "Статуса с таким названием не существует"
return text, -1, false
}
index := -1
found := false
for i, s := range statuses {
if s.Status == command {
found = true
index = i
break
}
}
if !found {
text := "Статуса с таким названием не существует"
return text, index, found
}
return "", index, found
}
|
package test
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCalcNumRuns(t *testing.T) {
// Helper for assert
nr := func(a, b int) []interface{} { return []interface{}{a, b} }
// Base case when no flags are passed
assert.Equal(t, nr(1, 1), nr(calcNumRuns(0, 0)))
// Trivially flaky test; run n times, one success is enough
assert.Equal(t, nr(3, 1), nr(calcNumRuns(0, 3)))
// Non-flaky test with multiple runs; run n times, must succeed every time
assert.Equal(t, nr(3, 3), nr(calcNumRuns(3, 0)))
// This is where it gets fiddly; when we pass both flags we should run the exact number
// of times but maintain a proportionate amount of flakiness.
assert.Equal(t, nr(1, 1), nr(calcNumRuns(1, 1)))
assert.Equal(t, nr(1, 1), nr(calcNumRuns(1, 3)))
assert.Equal(t, nr(3, 1), nr(calcNumRuns(3, 3)))
assert.Equal(t, nr(6, 2), nr(calcNumRuns(6, 3)))
assert.Equal(t, nr(7, 3), nr(calcNumRuns(7, 3)))
}
|
package info
import "context"
type infoServer struct {
managedNamespace string
}
func (i *infoServer) GetInfo(context.Context, *GetInfoRequest) (*InfoResponse, error) {
return &InfoResponse{ManagedNamespace: i.managedNamespace}, nil
}
func NewInfoServer(managedNamespace string) InfoServiceServer {
return &infoServer{managedNamespace}
}
|
package utils
import (
"fmt"
"golang.org/x/net/publicsuffix"
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
"time"
)
const (
UserAgent string = "Mozilla/5.0 (iPhone; CPU iPhone OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/75.0.3770.85 Mobile/15E148 Safari/605.1"
Patch string = "PATCH"
Post string = "POST"
Get string = "GET"
)
type UserAgentTransport struct {
RoundTripper http.RoundTripper
}
func (transport *UserAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Add("User-Agent", UserAgent)
return transport.RoundTripper.RoundTrip(req)
}
func NewTransport(proxyString string) (*UserAgentTransport, error) {
if proxyString == "" {
return &UserAgentTransport{http.DefaultTransport}, nil
}
if !strings.HasPrefix(proxyString, "http://") {
proxyString = fmt.Sprintf("http://%s", proxyString)
}
proxyURL, err := url.Parse(proxyString)
if err != nil {
return nil, err
}
return &UserAgentTransport{&http.Transport{Proxy: http.ProxyURL(proxyURL)}}, nil
}
func NewHttpClient(proxyString string) (http.Client, error) {
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return http.Client{
Timeout: time.Second * 2,
}, err
}
transport, err := NewTransport(proxyString)
if err != nil {
return http.Client{
Jar: jar,
Timeout: time.Second * 2,
}, err
}
return http.Client{
Jar: jar,
Timeout: time.Second * 2,
Transport: transport,
}, nil
}
func NewPostRequest(url string, values url.Values) (*http.Request, error) {
return http.NewRequest(Post, url, strings.NewReader(values.Encode()))
}
func NewGetRequest(url string) (*http.Request, error) {
return http.NewRequest(Get, url, nil)
}
|
// Copyright (c) 2013, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/zk
package zk
import (
"github.com/streadway/zk/proto"
)
// Permission is a bitmask of permissions that apply to a node for a scheme's identity.
type Permission int32
const (
PermRead Permission = 1 << iota
PermWrite
PermCreate
PermDelete
PermAdmin
PermAll Permission = (1 << iota) - 1
)
// Access represents permissions bitmask for an identity under a scheme
type Access struct {
Perms Permission // Bitmask of Permissions
Scheme string // one of "world" "auth" "digest" "host" "ip"
Identity string // Scheme specific identity like 127.0.0.1/32
}
// ACL is an Access Control List used in Create, SetAcl and GetAcl
type ACL []Access
// Commonly used ACLs for nodes
var (
AclOpen = ACL{Access{PermAll, "world", "anyone"}}
AclReadOnly = ACL{Access{PermRead, "world", "anyone"}}
)
func toProtoACLs(acl ACL) []proto.ACL {
out := make([]proto.ACL, len(acl))
for i, a := range acl {
out[i] = proto.ACL{
Perms: int32(a.Perms),
Id: proto.Id{
Scheme: a.Scheme,
Id: a.Identity,
},
}
}
return out
}
func fromProtoACLs(acls []proto.ACL) ACL {
out := make(ACL, len(acls))
for i, a := range acls {
out[i] = Access{
Perms: Permission(a.Perms),
Scheme: a.Id.Scheme,
Identity: a.Id.Id,
}
}
return out
}
|
package trello
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strings"
)
const TrelloAPI = "https://api.trello.com/1"
type Client struct {
client *http.Client
key string
token string
}
// Create a Trello API client.
func NewClient(key string, token string) *Client {
return &Client{&http.Client{}, key, token}
}
// Build API url.
func (client *Client) ApiURL(resource string) string {
params := url.Values{}
params.Add("key", client.key)
params.Add("token", client.token)
return TrelloAPI + resource + "?" + params.Encode()
}
// Query the API.
func (client *Client) Query(method string, resource string, params url.Values, output interface{}) error {
var requestURL = client.ApiURL(resource)
var requestHeaders = map[string]string{}
var req *http.Request
var err error
// Prepare the request.
if method == "GET" || method == "DELETE" {
if params != nil {
requestURL += "&" + params.Encode()
}
req, err = http.NewRequest(method, requestURL, nil)
}
if method == "POST" || method == "PUT" {
if params == nil {
params = url.Values{}
}
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
req, err = http.NewRequest(method, requestURL, strings.NewReader(params.Encode()))
}
// Query the API.
if err != nil {
return err
}
for key, val := range requestHeaders {
req.Header.Set(key, val)
}
req.Close = true
resp, err := client.client.Do(req)
if err != nil {
return err
}
// Read the body.
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Unexpected response when querying '%s' with method %s: %s", resource, method, string(body))
}
// Parse the json response.
if output != nil && json.Unmarshal(body, output) != nil {
return fmt.Errorf("Couldn't unmarshal body %s into %s", string(body), reflect.TypeOf(output))
}
return nil
}
// GET, PUT, POST, DELETE queries to the Trello API.
func (client *Client) Get(resource string, params url.Values, output interface{}) error {
return client.Query("GET", resource, params, output)
}
func (client *Client) Put(resource string, params url.Values, output interface{}) error {
return client.Query("PUT", resource, params, output)
}
func (client *Client) Post(resource string, params url.Values, output interface{}) error {
return client.Query("POST", resource, params, output)
}
func (client *Client) Delete(resource string) error {
return client.Query("DELETE", resource, nil, nil)
}
// Board.
func (client *Client) GetBoard(id string, params url.Values) (board Board, err error) {
return board, client.Get("/boards/"+id, params, &board)
}
func (client *Client) PutBoard(id string, params url.Values) (board Board, err error) {
return board, client.Put("/boards/"+id, params, &board)
}
func (client *Client) PostBoard(params url.Values) (board Board, err error) {
return board, client.Post("/boards", params, &board)
}
func (client *Client) DeleteBoard(id string) error {
return client.Delete("/boards/" + id)
}
// List.
func (client *Client) GetList(id string, params url.Values) (list List, err error) {
return list, client.Get("/lists/"+id, params, &list)
}
func (client *Client) PutList(id string, params url.Values) (list List, err error) {
return list, client.Put("/lists/"+id, params, &list)
}
func (client *Client) PostList(params url.Values) (list List, err error) {
return list, client.Post("/lists", params, &list)
}
func (client *Client) DeleteList(id string) error {
return client.Delete("/lists/" + id)
}
// Card.
func (client *Client) GetCard(id string, params url.Values) (card Card, err error) {
return card, client.Get("/cards/"+id, params, &card)
}
func (client *Client) PutCard(id string, params url.Values) (card Card, err error) {
return card, client.Put("/cards/"+id, params, &card)
}
func (client *Client) PostCard(params url.Values) (card Card, err error) {
return card, client.Post("/cards", params, &card)
}
func (client *Client) DeleteCard(id string) error {
return client.Delete("/cards/" + id)
}
// Webhook.
func (client *Client) GetWebhooks() (webhooks []Webhook, err error) {
return webhooks, client.Get("/token/"+client.token+"/webhooks", nil, &webhooks)
}
func (client *Client) GetWebhook(id string, params url.Values) (webhook Webhook, err error) {
return webhook, client.Get("/webhooks/"+id, params, &webhook)
}
func (client *Client) PutWebhook(id string, params url.Values) (webhook Webhook, err error) {
return webhook, client.Put("/webhooks/"+id, params, &webhook)
}
func (client *Client) PostWebhook(params url.Values) (webhook Webhook, err error) {
return webhook, client.Post("/webhooks", params, &webhook)
}
func (client *Client) DeleteWebhook(id string) error {
return client.Delete("/webhooks/" + id)
}
|
package secl
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00100103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:secl.001.001.03 Document"`
Message *TradeLegNotificationV03 `xml:"TradLegNtfctn"`
}
func (d *Document00100103) AddMessage() *TradeLegNotificationV03 {
d.Message = new(TradeLegNotificationV03)
return d.Message
}
// Scope
// The TradeLegNotification message is sent by the central counterparty (CCP) to a clearing member to report the trade that has been executed by the trading platform.
//
// The message definition is intended for use with the ISO20022 Business Application Header.
//
// Usage
// The CCP reports both sides of the trade from the clearing member perspective. The CCP sends a message to the global clearing member of the seller and a message to the global clearing member of the buyer. Note: An individual clearing member only clear its own trades.
type TradeLegNotificationV03 struct {
// Provides the identification of the clearing member (individual clearing member or general clearing member).
ClearingMember *iso20022.PartyIdentification35Choice `xml:"ClrMmb"`
// Identifies the clearing member account at the CCP through which the trade must be cleared (sometimes called position account).
ClearingAccount *iso20022.SecuritiesAccount18 `xml:"ClrAcct"`
// An account opened by the central counterparty in the name of the clearing member or its settlement agent within the account structure, for settlement purposes (gives information about the clearing member/its settlement agent account at the central securities depository).
DeliveryAccount *iso20022.SecuritiesAccount19 `xml:"DlvryAcct,omitempty"`
// Provides details about the non clearing member identification and account.
NonClearingMember *iso20022.PartyIdentificationAndAccount31 `xml:"NonClrMmb,omitempty"`
// Provides clearing details such as the settlement netting (or not) eligibility code or the clearing segment.
ClearingDetails *iso20022.Clearing4 `xml:"ClrDtls,omitempty"`
// Provides details about the trade leg such as the trade date, the settlement date or the trading currency.
TradeLegDetails *iso20022.TradeLeg8 `xml:"TradLegDtls"`
// Provides details about the settlement details of the trade leg such the settlement amount or the place of settlement.
SettlementDetails *iso20022.Settlement1 `xml:"SttlmDtls"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (t *TradeLegNotificationV03) AddClearingMember() *iso20022.PartyIdentification35Choice {
t.ClearingMember = new(iso20022.PartyIdentification35Choice)
return t.ClearingMember
}
func (t *TradeLegNotificationV03) AddClearingAccount() *iso20022.SecuritiesAccount18 {
t.ClearingAccount = new(iso20022.SecuritiesAccount18)
return t.ClearingAccount
}
func (t *TradeLegNotificationV03) AddDeliveryAccount() *iso20022.SecuritiesAccount19 {
t.DeliveryAccount = new(iso20022.SecuritiesAccount19)
return t.DeliveryAccount
}
func (t *TradeLegNotificationV03) AddNonClearingMember() *iso20022.PartyIdentificationAndAccount31 {
t.NonClearingMember = new(iso20022.PartyIdentificationAndAccount31)
return t.NonClearingMember
}
func (t *TradeLegNotificationV03) AddClearingDetails() *iso20022.Clearing4 {
t.ClearingDetails = new(iso20022.Clearing4)
return t.ClearingDetails
}
func (t *TradeLegNotificationV03) AddTradeLegDetails() *iso20022.TradeLeg8 {
t.TradeLegDetails = new(iso20022.TradeLeg8)
return t.TradeLegDetails
}
func (t *TradeLegNotificationV03) AddSettlementDetails() *iso20022.Settlement1 {
t.SettlementDetails = new(iso20022.Settlement1)
return t.SettlementDetails
}
func (t *TradeLegNotificationV03) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
t.SupplementaryData = append(t.SupplementaryData, newValue)
return newValue
}
|
package config
import (
"errors"
"evier/integrations"
"evier/job"
"evier/rsync"
"gopkg.in/yaml.v2"
"io/ioutil"
)
type Config struct {
Integrations []integrations.Config
Jobs []job.Job
Rsync rsync.Options
}
func ParseFile(path string) (c Config, e error) {
content, err := ioutil.ReadFile(path)
if err != nil {
return Config{}, err
}
return Parse(content)
}
func Parse(source []byte) (c Config, err error) {
result := Config{}
err = yaml.UnmarshalStrict(source, &result)
if err != nil {
return result, err
}
for _, job := range result.Jobs {
if job.Name == "" {
return result, errors.New("all jobs must have names")
}
if job.Source == "" {
return result, errors.New("all jobs must have sources")
}
if job.Destination == "" {
return result, errors.New("all jobs must have destinations")
}
}
return result, nil
}
|
package main
import (
"flag"
"logstream/pkg/proxy"
)
func main() {
laddr := flag.String(
"laddr",
":8500",
"local address on which to listen for incoming connections",
)
raddr := flag.String(
"raddr",
":8000",
"remote address to which to connect",
)
flag.Parse()
p := proxy.NewProxy(*laddr, *raddr)
p.Start()
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tables
import (
"fmt"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/rowcodec"
"go.uber.org/zap"
)
var (
// ErrInconsistentRowValue is the error when values in a row insertion does not match the expected ones.
ErrInconsistentRowValue = dbterror.ClassTable.NewStd(errno.ErrInconsistentRowValue)
// ErrInconsistentHandle is the error when the handle in the row/index insertions does not match.
ErrInconsistentHandle = dbterror.ClassTable.NewStd(errno.ErrInconsistentHandle)
// ErrInconsistentIndexedValue is the error when decoded values from the index mutation cannot match row value
ErrInconsistentIndexedValue = dbterror.ClassTable.NewStd(errno.ErrInconsistentIndexedValue)
)
type mutation struct {
key kv.Key
flags kv.KeyFlags
value []byte
indexID int64 // only for index mutations
}
type columnMaps struct {
ColumnIDToInfo map[int64]*model.ColumnInfo
ColumnIDToFieldType map[int64]*types.FieldType
IndexIDToInfo map[int64]*model.IndexInfo
IndexIDToRowColInfos map[int64][]rowcodec.ColInfo
}
// CheckDataConsistency checks whether the given set of mutations corresponding to a single row is consistent.
// Namely, assume the database is consistent before, applying the mutations shouldn't break the consistency.
// It aims at reducing bugs that will corrupt data, and preventing mistakes from spreading if possible.
//
// 3 conditions are checked:
// (1) row.value is consistent with input data
// (2) the handle is consistent in row and index insertions
// (3) the keys of the indices are consistent with the values of rows
//
// The check doesn't work and just returns nil when:
// (1) the table is partitioned
// (2) new collation is enabled and restored data is needed
//
// The check is performed on almost every write. Its performance matters.
// Let M = the number of mutations, C = the number of columns in the table,
// I = the sum of the number of columns in all indices,
// The time complexity is O(M * C + I)
// The space complexity is O(M + C + I)
func CheckDataConsistency(
txn kv.Transaction, sessVars *variable.SessionVars, t *TableCommon,
rowToInsert, rowToRemove []types.Datum, memBuffer kv.MemBuffer, sh kv.StagingHandle,
) error {
if t.Meta().GetPartitionInfo() != nil {
return nil
}
if sh == 0 {
// some implementations of MemBuffer doesn't support staging, e.g. that in br/pkg/lightning/backend/kv
return nil
}
indexMutations, rowInsertion, err := collectTableMutationsFromBufferStage(t, memBuffer, sh)
if err != nil {
return errors.Trace(err)
}
columnMaps := getColumnMaps(txn, t)
// Row insertion consistency check contributes the least to defending data-index consistency, but costs most CPU resources.
// So we disable it for now.
//
// if rowToInsert != nil {
// if err := checkRowInsertionConsistency(
// sessVars, rowToInsert, rowInsertion, columnMaps.ColumnIDToInfo, columnMaps.ColumnIDToFieldType, t.Meta().Name.O,
// ); err != nil {
// return errors.Trace(err)
// }
// }
if rowInsertion.key != nil {
if err = checkHandleConsistency(rowInsertion, indexMutations, columnMaps.IndexIDToInfo, t.Meta()); err != nil {
return errors.Trace(err)
}
}
if err := checkIndexKeys(
sessVars, t, rowToInsert, rowToRemove, indexMutations, columnMaps.IndexIDToInfo, columnMaps.IndexIDToRowColInfos,
); err != nil {
return errors.Trace(err)
}
return nil
}
// checkHandleConsistency checks whether the handles, with regard to a single-row change,
// in row insertions and index insertions are consistent.
// A PUT_index implies a PUT_row with the same handle.
// Deletions are not checked since the values of deletions are unknown
func checkHandleConsistency(rowInsertion mutation, indexMutations []mutation, indexIDToInfo map[int64]*model.IndexInfo, tblInfo *model.TableInfo) error {
var insertionHandle kv.Handle
var err error
if rowInsertion.key == nil {
return nil
}
insertionHandle, err = tablecodec.DecodeRowKey(rowInsertion.key)
if err != nil {
return errors.Trace(err)
}
for _, m := range indexMutations {
if len(m.value) == 0 {
continue
}
// Generate correct index id for check.
idxID := m.indexID & tablecodec.IndexIDMask
indexInfo, ok := indexIDToInfo[idxID]
if !ok {
return errors.New("index not found")
}
// If this is the temporary index data, need to remove the last byte of index data(version about when it is written).
var (
value []byte
orgKey []byte
indexHandle kv.Handle
)
if idxID != m.indexID {
if tablecodec.TempIndexValueIsUntouched(m.value) {
// We never commit the untouched key values to the storage. Skip this check.
continue
}
var tempIdxVal tablecodec.TempIndexValue
tempIdxVal, err = tablecodec.DecodeTempIndexValue(m.value)
if err != nil {
return err
}
if !tempIdxVal.IsEmpty() {
value = tempIdxVal.Current().Value
}
if len(value) == 0 {
// Skip the deleted operation values.
continue
}
orgKey = append(orgKey, m.key...)
tablecodec.TempIndexKey2IndexKey(orgKey)
indexHandle, err = tablecodec.DecodeIndexHandle(orgKey, value, len(indexInfo.Columns))
} else {
indexHandle, err = tablecodec.DecodeIndexHandle(m.key, m.value, len(indexInfo.Columns))
}
if err != nil {
return errors.Trace(err)
}
// NOTE: handle type can be different, see issue 29520
if indexHandle.IsInt() == insertionHandle.IsInt() && indexHandle.Compare(insertionHandle) != 0 {
err = ErrInconsistentHandle.GenWithStackByArgs(tblInfo.Name, indexInfo.Name.O, indexHandle, insertionHandle, m, rowInsertion)
logutil.BgLogger().Error("inconsistent handle in index and record insertions", zap.Error(err))
return err
}
}
return err
}
// checkIndexKeys checks whether the decoded data from keys of index mutations are consistent with the expected ones.
//
// How it works:
//
// Assume the set of row values changes from V1 to V2, we check
// (1) V2 - V1 = {added indices}
// (2) V1 - V2 = {deleted indices}
//
// To check (1), we need
// (a) {added indices} is a subset of {needed indices} => each index mutation is consistent with the input/row key/value
// (b) {needed indices} is a subset of {added indices}. The check process would be exactly the same with how we generate the mutations, thus ignored.
func checkIndexKeys(
sessVars *variable.SessionVars, t *TableCommon, rowToInsert, rowToRemove []types.Datum,
indexMutations []mutation, indexIDToInfo map[int64]*model.IndexInfo,
indexIDToRowColInfos map[int64][]rowcodec.ColInfo,
) error {
var indexData []types.Datum
for _, m := range indexMutations {
var value []byte
// Generate correct index id for check.
idxID := m.indexID & tablecodec.IndexIDMask
indexInfo, ok := indexIDToInfo[idxID]
if !ok {
return errors.New("index not found")
}
rowColInfos, ok := indexIDToRowColInfos[idxID]
if !ok {
return errors.New("index not found")
}
var isTmpIdxValAndDeleted bool
// If this is temp index data, need remove last byte of index data.
if idxID != m.indexID {
if tablecodec.TempIndexValueIsUntouched(m.value) {
// We never commit the untouched key values to the storage. Skip this check.
continue
}
tmpVal, err := tablecodec.DecodeTempIndexValue(m.value)
if err != nil {
return err
}
curElem := tmpVal.Current()
isTmpIdxValAndDeleted = curElem.Delete
value = append(value, curElem.Value...)
} else {
value = append(value, m.value...)
}
// when we cannot decode the key to get the original value
if len(value) == 0 && NeedRestoredData(indexInfo.Columns, t.Meta().Columns) {
continue
}
decodedIndexValues, err := tablecodec.DecodeIndexKV(
m.key, value, len(indexInfo.Columns), tablecodec.HandleNotNeeded, rowColInfos,
)
if err != nil {
return errors.Trace(err)
}
// reuse the underlying memory, save an allocation
if indexData == nil {
indexData = make([]types.Datum, 0, len(decodedIndexValues))
} else {
indexData = indexData[:0]
}
for i, v := range decodedIndexValues {
fieldType := t.Columns[indexInfo.Columns[i].Offset].FieldType.ArrayType()
datum, err := tablecodec.DecodeColumnValue(v, fieldType, sessVars.Location())
if err != nil {
return errors.Trace(err)
}
indexData = append(indexData, datum)
}
// When it is in add index new backfill state.
if len(value) == 0 || isTmpIdxValAndDeleted {
err = compareIndexData(sessVars.StmtCtx, t.Columns, indexData, rowToRemove, indexInfo, t.Meta())
} else {
err = compareIndexData(sessVars.StmtCtx, t.Columns, indexData, rowToInsert, indexInfo, t.Meta())
}
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// checkRowInsertionConsistency checks whether the values of row mutations are consistent with the expected ones
// We only check data added since a deletion of a row doesn't care about its value (and we cannot know it)
func checkRowInsertionConsistency(
sessVars *variable.SessionVars, rowToInsert []types.Datum, rowInsertion mutation,
columnIDToInfo map[int64]*model.ColumnInfo, columnIDToFieldType map[int64]*types.FieldType, tableName string,
) error {
if rowToInsert == nil {
// it's a deletion
return nil
}
decodedData, err := tablecodec.DecodeRowToDatumMap(rowInsertion.value, columnIDToFieldType, sessVars.Location())
if err != nil {
return errors.Trace(err)
}
// NOTE: we cannot check if the decoded values contain all columns since some columns may be skipped. It can even be empty
// Instead, we check that decoded index values are consistent with the input row.
for columnID, decodedDatum := range decodedData {
inputDatum := rowToInsert[columnIDToInfo[columnID].Offset]
cmp, err := decodedDatum.Compare(sessVars.StmtCtx, &inputDatum, collate.GetCollator(decodedDatum.Collation()))
if err != nil {
return errors.Trace(err)
}
if cmp != 0 {
err = ErrInconsistentRowValue.GenWithStackByArgs(tableName, inputDatum.String(), decodedDatum.String())
logutil.BgLogger().Error("inconsistent row value in row insertion", zap.Error(err))
return err
}
}
return nil
}
// collectTableMutationsFromBufferStage collects mutations of the current table from the mem buffer stage
// It returns: (1) all index mutations (2) the only row insertion
// If there are no row insertions, the 2nd returned value is nil
// If there are multiple row insertions, an error is returned
func collectTableMutationsFromBufferStage(t *TableCommon, memBuffer kv.MemBuffer, sh kv.StagingHandle) (
[]mutation, mutation, error,
) {
indexMutations := make([]mutation, 0)
var rowInsertion mutation
var err error
inspector := func(key kv.Key, flags kv.KeyFlags, data []byte) {
// only check the current table
if tablecodec.DecodeTableID(key) == t.physicalTableID {
m := mutation{key, flags, data, 0}
if rowcodec.IsRowKey(key) {
if len(data) > 0 {
if rowInsertion.key == nil {
rowInsertion = m
} else {
err = errors.Errorf(
"multiple row mutations added/mutated, one = %+v, another = %+v", rowInsertion, m,
)
}
}
} else {
_, m.indexID, _, err = tablecodec.DecodeIndexKey(m.key)
if err != nil {
err = errors.Trace(err)
}
indexMutations = append(indexMutations, m)
}
}
}
memBuffer.InspectStage(sh, inspector)
return indexMutations, rowInsertion, err
}
// compareIndexData compares the decoded index data with the input data.
// Returns error if the index data is not a subset of the input data.
func compareIndexData(
sc *stmtctx.StatementContext, cols []*table.Column, indexData, input []types.Datum, indexInfo *model.IndexInfo,
tableInfo *model.TableInfo,
) error {
for i := range indexData {
decodedMutationDatum := indexData[i]
expectedDatum := input[indexInfo.Columns[i].Offset]
tablecodec.TruncateIndexValue(
&expectedDatum, indexInfo.Columns[i],
cols[indexInfo.Columns[i].Offset].ColumnInfo,
)
tablecodec.TruncateIndexValue(
&decodedMutationDatum, indexInfo.Columns[i],
cols[indexInfo.Columns[i].Offset].ColumnInfo,
)
comparison, err := CompareIndexAndVal(sc, expectedDatum, decodedMutationDatum,
collate.GetCollator(decodedMutationDatum.Collation()),
cols[indexInfo.Columns[i].Offset].ColumnInfo.FieldType.IsArray() && expectedDatum.Kind() == types.KindMysqlJSON)
if err != nil {
return errors.Trace(err)
}
if comparison != 0 {
err = ErrInconsistentIndexedValue.GenWithStackByArgs(
tableInfo.Name.O, indexInfo.Name.O, cols[indexInfo.Columns[i].Offset].ColumnInfo.Name.O,
decodedMutationDatum.String(), expectedDatum.String(),
)
logutil.BgLogger().Error("inconsistent indexed value in index insertion", zap.Error(err))
return err
}
}
return nil
}
// CompareIndexAndVal compare index valued and row value.
func CompareIndexAndVal(sctx *stmtctx.StatementContext, rowVal types.Datum, idxVal types.Datum, collator collate.Collator, cmpMVIndex bool) (int, error) {
var cmpRes int
var err error
if cmpMVIndex {
// If it is multi-valued index, we should check the JSON contains the indexed value.
bj := rowVal.GetMysqlJSON()
count := bj.GetElemCount()
for elemIdx := 0; elemIdx < count; elemIdx++ {
jsonDatum := types.NewJSONDatum(bj.ArrayGetElem(elemIdx))
cmpRes, err = jsonDatum.Compare(sctx, &idxVal, collate.GetBinaryCollator())
if err != nil {
return 0, errors.Trace(err)
}
if cmpRes == 0 {
break
}
}
} else {
cmpRes, err = idxVal.Compare(sctx, &rowVal, collator)
}
return cmpRes, err
}
// getColumnMaps tries to get the columnMaps from transaction options. If there isn't one, it builds one and stores it.
// It saves redundant computations of the map.
func getColumnMaps(txn kv.Transaction, t *TableCommon) columnMaps {
getter := func() (map[int64]columnMaps, bool) {
m, ok := txn.GetOption(kv.TableToColumnMaps).(map[int64]columnMaps)
return m, ok
}
setter := func(maps map[int64]columnMaps) {
txn.SetOption(kv.TableToColumnMaps, maps)
}
columnMaps := getOrBuildColumnMaps(getter, setter, t)
return columnMaps
}
// getOrBuildColumnMaps tries to get the columnMaps from some place. If there isn't one, it builds one and stores it.
// It saves redundant computations of the map.
func getOrBuildColumnMaps(
getter func() (map[int64]columnMaps, bool), setter func(map[int64]columnMaps), t *TableCommon,
) columnMaps {
tableMaps, ok := getter()
if !ok || tableMaps == nil {
tableMaps = make(map[int64]columnMaps)
}
maps, ok := tableMaps[t.tableID]
if !ok {
maps = columnMaps{
make(map[int64]*model.ColumnInfo, len(t.Meta().Columns)),
make(map[int64]*types.FieldType, len(t.Meta().Columns)),
make(map[int64]*model.IndexInfo, len(t.Indices())),
make(map[int64][]rowcodec.ColInfo, len(t.Indices())),
}
for _, col := range t.Meta().Columns {
maps.ColumnIDToInfo[col.ID] = col
maps.ColumnIDToFieldType[col.ID] = &(col.FieldType)
}
for _, index := range t.Indices() {
if index.Meta().Primary && t.meta.IsCommonHandle {
continue
}
maps.IndexIDToInfo[index.Meta().ID] = index.Meta()
maps.IndexIDToRowColInfos[index.Meta().ID] = BuildRowcodecColInfoForIndexColumns(index.Meta(), t.Meta())
}
tableMaps[t.tableID] = maps
setter(tableMaps)
}
return maps
}
// only used in tests
// commands is a comma separated string, each representing a type of corruptions to the mutations
// The injection depends on actual encoding rules.
func corruptMutations(t *TableCommon, txn kv.Transaction, sh kv.StagingHandle, cmds string) error {
commands := strings.Split(cmds, ",")
memBuffer := txn.GetMemBuffer()
indexMutations, _, err := collectTableMutationsFromBufferStage(t, memBuffer, sh)
if err != nil {
return errors.Trace(err)
}
for _, cmd := range commands {
switch cmd {
case "extraIndex":
// an extra index mutation
{
if len(indexMutations) == 0 {
continue
}
indexMutation := indexMutations[0]
key := make([]byte, len(indexMutation.key))
copy(key, indexMutation.key)
key[len(key)-1]++
if len(indexMutation.value) == 0 {
if err := memBuffer.Delete(key); err != nil {
return errors.Trace(err)
}
} else {
if err := memBuffer.Set(key, indexMutation.value); err != nil {
return errors.Trace(err)
}
}
}
case "missingIndex":
// an index mutation is missing
// "missIndex" should be placed in front of "extraIndex"es,
// in case it removes the mutation that was just added
{
indexMutation := indexMutations[0]
memBuffer.RemoveFromBuffer(indexMutation.key)
}
case "corruptIndexKey":
// a corrupted index mutation.
// TODO: distinguish which part is corrupted, value or handle
{
indexMutation := indexMutations[0]
key := indexMutation.key
memBuffer.RemoveFromBuffer(key)
key[len(key)-1]++
if len(indexMutation.value) == 0 {
if err := memBuffer.Delete(key); err != nil {
return errors.Trace(err)
}
} else {
if err := memBuffer.Set(key, indexMutation.value); err != nil {
return errors.Trace(err)
}
}
}
case "corruptIndexValue":
// TODO: distinguish which part to corrupt, int handle, common handle, or restored data?
// It doesn't make much sense to always corrupt the last byte
{
if len(indexMutations) == 0 {
continue
}
indexMutation := indexMutations[0]
value := indexMutation.value
if len(value) > 0 {
value[len(value)-1]++
if err := memBuffer.Set(indexMutation.key, value); err != nil {
return errors.Trace(err)
}
}
}
default:
return fmt.Errorf("unknown command to corrupt mutation: %s", cmd)
}
}
return nil
}
func injectMutationError(t *TableCommon, txn kv.Transaction, sh kv.StagingHandle) error {
failpoint.Inject("corruptMutations", func(commands failpoint.Value) {
failpoint.Return(corruptMutations(t, txn, sh, commands.(string)))
})
return nil
}
|
package gotils_test
import (
"net/http"
"os"
"testing"
. "github.com/onsi/gomega"
gotils "github.com/korovkin/gotils"
)
type MyObject struct {
ErrorStr string `json:"err_str"`
HttpStatusCode int `json:"err_http_status_code"`
ClientErrCode int `json:"err_client_code"`
M map[string]string `json:"m"`
A []string `json:"a"`
}
func TestGOB(t *testing.T) {
RegisterTestingT(t)
t.Run("gob", func(_ *testing.T) {
e := &MyObject{
ErrorStr: os.ErrNotExist.Error(),
HttpStatusCode: http.StatusNotFound,
ClientErrCode: 0,
M: map[string]string{
"a": "aa",
},
A: []string{
"a", "aa", "aaa",
},
}
eCopy := &MyObject{}
gotils.FromGOB(gotils.ToGOB(e), eCopy)
Expect(eCopy).NotTo(BeNil())
// log.Println("=> e:", gotils.ToJSONString(e))
// log.Println("=> eCopy:", gotils.ToJSONString(eCopy))
Expect(eCopy.ErrorStr).To(BeEquivalentTo(e.ErrorStr))
Expect(eCopy.HttpStatusCode).To(BeEquivalentTo(e.HttpStatusCode))
Expect(eCopy.ClientErrCode).To(BeEquivalentTo(e.ClientErrCode))
})
}
|
package core
import (
"errors"
"fmt"
)
type Word uint16
type OpcodeError struct {
Opcode Word
}
func (err *OpcodeError) Error() string {
return fmt.Sprintf("invalid opcode %#04x", err.Opcode)
}
type State struct {
Registers
Ram Memory
step func(bool) error
}
type asyncState struct {
*State
stepper chan struct{}
signal chan error
}
func (s *State) Start() error {
if s.step != nil {
return errors.New("State has already started")
}
async := asyncState{
State: s,
stepper: make(chan struct{}),
signal: make(chan error),
}
go func() {
for {
if err := async.stepInstruction(); err != nil {
async.signal <- err
break
}
}
close(async.signal)
}()
var lastError error
s.step = func(done bool) error {
if done {
// if we have an error, the async machine has already stopped
if lastError == nil {
close(async.stepper)
<-async.signal
}
return nil
}
if lastError != nil {
return lastError
}
async.stepper <- struct{}{}
lastError = <-async.signal
return lastError
}
// stepInstruction will send out one signal when it's ready.
// We don't care about waiting until it's ready, but we need to eat that
// first signal.
<-async.signal
return nil
}
func (s *State) StepCycle() error {
if s.step == nil {
return errors.New("State has not started")
}
return s.step(false)
}
func (s *State) Stop() error {
if s.step == nil {
return errors.New("State is already stopped")
}
s.step(true)
s.step = nil
return nil
}
func decodeOpcode(opcode Word) (oooo, aaaaaa, bbbbbb Word) {
oooo = opcode & 0xF
aaaaaa = (opcode >> 4) & 0x3F
bbbbbb = (opcode >> 10) & 0x3F
return
}
// wordCount counts the number of words in the instruction identified by the given opcode
func wordCount(opcode Word) Word {
_, a, b := decodeOpcode(opcode)
count := Word(1)
switch {
case a >= 16 && a <= 23:
case a == 30:
case a == 31:
count++
}
switch {
case b >= 16 && b <= 23:
case b == 30:
case b == 31:
count++
}
return count
}
const (
assignableTypeNone = iota
assignableTypeRegister
assignableTypeMemory
)
type assignable struct {
valueType int
index Word
}
var errStopped = errors.New("stopped")
func (s *asyncState) readAssignable(assignable assignable) (result Word, ok bool) {
switch assignable.valueType {
case assignableTypeRegister:
result = s.Registers[assignable.index]
ok = true
case assignableTypeMemory:
result = s.Ram.GetWord(assignable.index)
ok = true
}
return
}
// When writing to a non-assignable location, returns false, nil
// When writing to a protected location, returns false, error
// Otherwise returns true, nil
func (s *asyncState) writeAssignable(assignable assignable, value Word) (bool, error) {
switch assignable.valueType {
case assignableTypeRegister:
s.Registers[assignable.index] = value
return true, nil
case assignableTypeMemory:
if err := s.Ram.SetWord(assignable.index, value); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func (s *asyncState) waitCycle() bool {
s.signal <- nil
_, ok := <-s.stepper
return ok
}
func (s *asyncState) nextWord() (Word, bool) {
// pause for a cycle
if !s.waitCycle() {
return 0, false
}
word := s.Ram.GetWord(s.PC())
s.IncrPC()
return word, true
}
func (s *asyncState) skipInstruction() bool {
if !s.waitCycle() {
return false
}
s.SetPC(s.PC() + wordCount(s.Ram.GetWord(s.PC())))
return true
}
func (s *asyncState) translateOperand(op Word) (val Word, assign assignable, ok bool) {
ok = true
switch op {
case 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7:
// 0x00-0x07: register (A, B, C, X, Y, Z, I or J, in that order)
assign = assignable{assignableTypeRegister, op}
case 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf:
// 0x08-0x0f: [register]
assign = assignable{assignableTypeMemory, s.Registers[op-8]}
case 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17:
// 0x10-0x17: [next word + register]
// note: takes 1 cycle
var w Word
if w, ok = s.nextWord(); !ok {
return
}
assign = assignable{assignableTypeMemory, w + s.Registers[op-16]}
case 0x18:
// 0x18: POP / [SP++]
assign = assignable{assignableTypeMemory, s.SP()}
s.IncrSP()
case 0x19:
// 0x19: PEEK / [SP]
assign = assignable{assignableTypeMemory, s.SP()}
case 0x1a:
// 0x1a: PUSH / [--SP]
s.DecrSP()
assign = assignable{assignableTypeMemory, s.SP()}
case 0x1b:
// 0x1b: SP
assign = assignable{assignableTypeRegister, registerSP}
case 0x1c:
// 0x1c: PC
assign = assignable{assignableTypeRegister, registerPC}
case 0x1d:
// 0x1d: O
assign = assignable{assignableTypeRegister, registerO}
case 0x1e:
// 0x1e: [next word]
// note: takes 1 cycle
var w Word
if w, ok = s.nextWord(); !ok {
return
}
assign = assignable{assignableTypeMemory, w}
case 0x1f:
// 0x1f: next word (literal)
// note: takes 1 cycle
var w Word
if w, ok = s.nextWord(); !ok {
return
}
val = w
default:
// 0x20-0x3f: literal value 0x00-0x1f (literal)
if op > 0x3f {
panic("Out of bounds operand")
}
val = op - 0x20
}
if assVal, ok := s.readAssignable(assign); ok {
val = assVal
}
return
}
func (s *asyncState) stepInstruction() error {
// fetch
opcode, ok := s.nextWord()
if !ok {
return errStopped
}
// decode
ins, a, b := decodeOpcode(opcode)
var assign assignable
if ins != 0 { // don't translate for the non-basic opcodes
if a, assign, ok = s.translateOperand(a); !ok {
return errStopped
}
if b, _, ok = s.translateOperand(b); !ok {
return errStopped
}
}
// execute
var val Word
switch ins {
case 0:
// non-basic opcodes
ins, a = a, b
switch ins {
case 1:
// JSR a - pushes the address of the next instruction to the stack, then sets PC to a
// 2 cycles
if !s.waitCycle() {
return errStopped
}
if _, assign, ok = s.translateOperand(0x1a); !ok { // PUSH
return errStopped
}
if a, _, ok = s.translateOperand(a); !ok {
return errStopped
}
s.writeAssignable(assign, s.PC())
s.SetPC(a)
assign = assignable{}
default:
return &OpcodeError{opcode}
}
case 1:
// SET a, b - sets a to b
// 1 cycle
val = b
case 2:
// ADD a, b - sets a to a+b, sets O to 0x0001 if there's an overflow, 0x0 otherwise
// 2 cycles
if !s.waitCycle() {
return errStopped
}
result := uint32(a) + uint32(b)
val = Word(result & 0xFFFF)
s.SetO(Word(result >> 16)) // will always be 0x0 or 0x1
case 3:
// SUB a, b - sets a to a-b, sets O to 0xffff if there's an underflow, 0x0 otherwise
// 2 cycles
if !s.waitCycle() {
return errStopped
}
result := uint32(a) - uint32(b)
val = Word(result & 0xFFFF)
s.SetO(Word(result >> 16)) // will always be 0x0 or 0xffff
case 4:
// MUL a, b - sets a to a*b, sets O to ((a*b)>>16)&0xffff
// 2 cycles
if !s.waitCycle() {
return errStopped
}
result := uint32(a) * uint32(b)
val = Word(result & 0xFFFF)
s.SetO(Word(result >> 16))
case 5:
// DIV a, b - sets a to a/b, sets O to ((a<<16)/b)&0xffff. if b==0, sets a and O to 0 instead.
// 3 cycles
if !s.waitCycle() || !s.waitCycle() {
return errStopped
}
if b == 0 {
val = 0
s.SetO(0)
} else {
val = a / b
s.SetO(Word(((uint32(a) << 16) / uint32(b))))
}
case 6:
// MOD a, b - sets a to a%b. if b==0, sets a to 0 instead.
// 3 cycles
if !s.waitCycle() || !s.waitCycle() {
return errStopped
}
if b == 0 {
val = 0
} else {
val = a % b
}
case 7:
// SHL a, b - sets a to a<<b, sets O to ((a<<b)>>16)&0xffff
// 2 cycles
if !s.waitCycle() {
return errStopped
}
result := uint32(a) << uint32(b)
val = Word(result & 0xFFFF)
s.SetO(Word(result >> 16))
case 8:
// SHR a, b - sets a to a>>b, sets O to ((a<<16)>>b)&0xffff
// 2 cycles
if !s.waitCycle() {
return errStopped
}
val = a >> b
s.SetO(Word((uint32(a) << 16) >> b))
case 9:
// AND a, b - sets a to a&b
// 1 cycle
val = a & b
case 10:
// BOR a, b - sets a to a|b
// 1 cycle
val = a | b
case 11:
// XOR a, b - sets a to a^b
// 1 cycle
val = a ^ b
case 12:
// IFE a, b - performs next instruction only if a==b
// 2 cycles +1
if !s.waitCycle() {
return errStopped
}
if a != b {
if !s.skipInstruction() {
return errStopped
}
}
assign = assignable{}
case 13:
// IFN a, b - performs next instruction only if a!=b
// 2 cycles +1
if !s.waitCycle() {
return errStopped
}
if a == b {
if !s.skipInstruction() {
return errStopped
}
}
assign = assignable{}
case 14:
// IFG a, b - performs next instruction only if a>b
// 2 cycles +1
if !s.waitCycle() {
return errStopped
}
if a <= b {
if !s.skipInstruction() {
return errStopped
}
}
assign = assignable{}
case 15:
// IFB a, b - performs next instruction only if (a&b)!=0
// 2 cycles +1
if !s.waitCycle() {
return errStopped
}
if (a & b) == 0 {
if !s.skipInstruction() {
return errStopped
}
}
assign = assignable{}
default:
panic("Out of bounds opcode")
}
// store
if _, err := s.writeAssignable(assign, val); err != nil {
return err
}
return nil
}
|
package gormsql
import (
"github.com/atymkiv/echo_frame_learning/blog/model"
"github.com/jinzhu/gorm"
)
// NewUser returns a new user database instance
func NewUser(db Database) *User {
return &User{db: db}
}
// Interface for post database
type Database interface {
Where(query interface{}, args ...interface{}) *gorm.DB
}
// User represents the client for user table
type User struct {
db Database
}
// FindByEmail queries for single user by email
func (u *User) FindByEmail(email string) (*blog.User, error) {
var user = new(blog.User)
if err := u.db.Where("Email = ?", email).First(user).Error; err != nil {
return nil, err
}
return user, nil
}
|
package main
import (
"github.com/BurntSushi/toml"
)
var config configuration
type configuration struct {
Database databaseConfig
Server serverConfig
Captcha captchaConfig
}
type databaseConfig struct {
Username string
Password string
Database string
}
type serverConfig struct {
Address string
}
type captchaConfig struct {
HCaptchaSiteKey string
HCaptchaSecretKey string
}
func configure() {
_, err := toml.DecodeFile("config.toml", &config)
if err != nil {
panic(err)
}
}
|
package polls
import (
"fmt"
"github.com/Nv7-Github/Nv7Haven/eod/types"
)
func (b *Polls) handlePollSuccess(p types.Poll) {
b.lock.RLock()
dat, exists := b.dat[p.Guild]
b.lock.RUnlock()
if !exists {
return
}
controversial := dat.VoteCount != 0 && float32(p.Downvotes)/float32(dat.VoteCount) >= 0.3
controversialTxt := ""
if controversial {
controversialTxt = " 🌩️"
}
switch p.Kind {
case types.PollCombo:
els, ok := p.Data["elems"].([]string)
if !ok {
dat := p.Data["elems"].([]interface{})
els = make([]string, len(dat))
for i, val := range dat {
els[i] = val.(string)
}
}
b.elemCreate(p.Value3, els, p.Value4, controversialTxt, p.Guild)
case types.PollSign:
b.mark(p.Guild, p.Value1, p.Value2, p.Value4, controversialTxt)
case types.PollImage:
b.image(p.Guild, p.Value1, p.Value2, p.Value4, controversialTxt)
case types.PollCategorize:
els, ok := p.Data["elems"].([]string)
if !ok {
dat := p.Data["elems"].([]interface{})
els := make([]string, len(dat))
for i, val := range dat {
els[i] = val.(string)
}
}
for _, val := range els {
b.Categorize(val, p.Value1, p.Guild)
}
if len(els) == 1 {
b.dg.ChannelMessageSend(dat.NewsChannel, fmt.Sprintf("🗃️ Added **%s** to **%s** (By <@%s>)%s", els[0], p.Value1, p.Value4, controversialTxt))
} else {
b.dg.ChannelMessageSend(dat.NewsChannel, fmt.Sprintf("🗃️ Added **%d elements** to **%s** (By <@%s>)%s", len(els), p.Value1, p.Value4, controversialTxt))
}
case types.PollUnCategorize:
els := p.Data["elems"].([]string)
for _, val := range els {
b.UnCategorize(val, p.Value1, p.Guild)
}
if len(els) == 1 {
b.dg.ChannelMessageSend(dat.NewsChannel, fmt.Sprintf("🗃️ Removed **%s** from **%s** (By <@%s>)%s", els[0], p.Value1, p.Value4, controversialTxt))
} else {
b.dg.ChannelMessageSend(dat.NewsChannel, fmt.Sprintf("🗃️ Removed **%d elements** from **%s** (By <@%s>)%s", len(els), p.Value1, p.Value4, controversialTxt))
}
case types.PollCatImage:
b.catImage(p.Guild, p.Value1, p.Value2, p.Value4, controversialTxt)
case types.PollColor:
b.color(p.Guild, p.Value1, p.Data["color"].(int), p.Value4, controversialTxt)
case types.PollCatColor:
b.catColor(p.Guild, p.Value1, p.Data["color"].(int), p.Value4, controversialTxt)
}
}
|
package agent
type Registration struct {
Instance Instance `json:"instance"`
}
type Instance struct {
InstanceId string `json:"instanceId"`
HostName string `json:"hostName"`
App string `json:"app"`
IpAddr string `json:"ipAddr"`
VipAddr string `json:"vipAddress"`
SecureVipAddr string `json:"secureVipAddress"`
Status string `json:"status"`
Port Port `json:"port"`
SecurePort Port `json:"securePort"`
HealthCheckUrl string `json:"healthCheckUrl"`
StatusPageUrl string `json:"statusPageUrl"`
HomePageUrl string `json:"homePageUrl"`
DataCenterInfo DataCenterInfo `json:"dataCenterInfo"`
LeaseInfo LeaseInfo `json:"leaseInfo"`
}
type Port struct {
Port string `json:"$"`
Enabled string `json:"@enabled"`
}
type DataCenterInfo struct {
Class string `json:"@class"`
Name string `json:"name"`
}
type LeaseInfo struct {
DurationInSecs string `json:"durationInSecs"`
}
|
package mysql
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"log"
"time"
)
type Config struct {
Host string `yaml:"host"`
Port uint `yaml:"port"`
User string `yaml:"user"`
Password string `yaml:"password"`
Database string `yaml:"database"`
Charset string `yaml:"charset"`
}
var Db *sql.DB
func (c Config) Init() (*sql.DB, error) {
var err error
sqlDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true&charset=%s",
c.User,
c.Password,
c.Host,
c.Port,
c.Database,
c.Charset,
)
Db, err = sql.Open("mysql", sqlDsn)
if err != nil {
log.Println("MySql Connect failure: ", err.Error())
return nil, err
}
err = Db.Ping()
if err != nil {
log.Println("MySql Ping failure: ", err.Error())
return nil, err
}
// Connection pool and timeouts
// 连接池 和 超时
Db.SetMaxOpenConns(150) // 最大打开连接数
Db.SetMaxIdleConns(10) // 最大空闲连接数
// 连接过期时间 测试结果如下:
// 1如不设过期时间 连接会一直不释放 连接池内连接数量为小于等于maxopen的数字
// 2如设置了连接过期时间
// 2.1 连接池内连接数量在连接过期后归零
// 2.2 如之前连接数达到了最大打开连接数 连接池内连接数会依次经历: 由maxopen => maxidle => 0
Db.SetConnMaxLifetime(time.Second * 5)
return Db, nil
} |
package client_test
import (
"context"
"fmt"
"net/http"
"reflect"
"strings"
"testing"
"time"
"github.com/chanioxaris/go-datagovgr/datagovgrtest"
"github.com/jarcoal/httpmock"
)
type testPayload struct {
Author string `json:"author"`
}
func TestClient_MakeRequestGET_Success(t *testing.T) {
ctx := context.Background()
fixture := datagovgrtest.NewFixture(t)
expectedPayload := testPayload{Author: "chanioxaris"}
httpmock.Activate()
defer httpmock.DeactivateAndReset()
httpmock.RegisterResponder(
http.MethodGet,
fmt.Sprintf("%s/%s", fixture.BaseURL, fixture.TestPath),
httpmock.NewJsonResponderOrPanic(http.StatusOK, expectedPayload),
)
payload := testPayload{}
err := fixture.InternalClient.MakeRequestGET(ctx, fixture.TestPath, &payload, nil)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
if !reflect.DeepEqual(payload, expectedPayload) {
t.Fatalf("Expected payload %+v, but got %+v", expectedPayload, payload)
}
}
func TestClient_MakeRequestGET_QueryParameters_Success(t *testing.T) {
ctx := context.Background()
fixture := datagovgrtest.NewFixture(t)
expectedPayload := testPayload{Author: "chanioxaris"}
tests := []struct {
name string
query map[string]string
}{
{
name: "HTTP request with date_from query parameter",
query: map[string]string{
"date_from": "2009-01-03",
},
},
{
name: "HTTP request with date_to query parameter",
query: map[string]string{
"date_to": "2009-01-03",
},
},
{
name: "HTTP request with date_from and date_to query parameters",
query: map[string]string{
"date_from": "2009-01-03",
"date_to": "2009-01-04",
},
},
}
httpmock.Activate()
defer httpmock.DeactivateAndReset()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
httpmock.RegisterResponderWithQuery(
http.MethodGet,
fmt.Sprintf("%s/%s", fixture.BaseURL, fixture.TestPath),
tt.query,
httpmock.NewJsonResponderOrPanic(http.StatusOK, expectedPayload),
)
payload := testPayload{}
err := fixture.InternalClient.MakeRequestGET(ctx, fixture.TestPath, &payload, tt.query)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
if !reflect.DeepEqual(payload, expectedPayload) {
t.Fatalf("Expected payload %+v, but got %+v", expectedPayload, payload)
}
})
}
}
func TestClient_MakeRequestGET_Error(t *testing.T) {
fixture := datagovgrtest.NewFixture(t)
tests := []struct {
name string
path string
statusCode int
body string
ctx context.Context
expectedError string
}{
{
name: "Nil context",
path: "nil-context",
statusCode: 0,
body: "",
ctx: nil,
expectedError: "nil Context",
},
{
name: "Unexpected status code",
path: "unexpected-status-code",
statusCode: http.StatusInternalServerError,
body: "",
ctx: context.Background(),
expectedError: fmt.Sprintf("unexpected status code: %v", http.StatusInternalServerError),
},
{
name: "Invalid response body",
path: "invalid-response-body",
statusCode: http.StatusOK,
body: `{"author": 13}`,
ctx: context.Background(),
expectedError: "json: cannot unmarshal",
},
}
httpmock.Activate()
defer httpmock.DeactivateAndReset()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
httpmock.RegisterResponder(
http.MethodGet,
fmt.Sprintf("%s/%s", fixture.BaseURL, tt.path),
httpmock.NewStringResponder(tt.statusCode, tt.body),
)
payload := testPayload{}
err := fixture.InternalClient.MakeRequestGET(tt.ctx, tt.path, &payload, nil)
if err == nil {
t.Fatal("Expected error, but got nil")
}
if !strings.Contains(err.Error(), tt.expectedError) {
t.Fatalf(`Expected error to contain "%v", but got "%v"`, tt.expectedError, err)
}
})
}
}
func TestClient_MakeRequestGET_Error_Timeout(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
defer cancel()
fixture := datagovgrtest.NewFixture(t)
expectedError := "context deadline exceeded"
httpmock.Activate()
defer httpmock.DeactivateAndReset()
httpmock.RegisterResponder(
http.MethodGet,
fmt.Sprintf("%s/%s", fixture.BaseURL, fixture.TestPath),
func(req *http.Request) (*http.Response, error) {
time.Sleep(time.Second * 1)
return httpmock.NewJsonResponse(http.StatusOK, testPayload{Author: "chanioxaris"})
},
)
payload := testPayload{}
err := fixture.InternalClient.MakeRequestGET(ctx, fixture.TestPath, &payload, nil)
if err == nil {
t.Fatal("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err)
}
}
|
package alarm
type AlarmCounter struct {
YellowCount float64
RedCount float64
}
|
package here_test
import (
"github.com/codingsince1985/geo-golang"
"github.com/codingsince1985/geo-golang/here"
"strings"
"testing"
)
const appID = "YOUR_APP_ID"
const appCode = "YOUR_APP_CODE"
var geocoder = here.Geocoder(appID, appCode, 100)
func TestGeocode(t *testing.T) {
location, err := geocoder.Geocode("Melbourne VIC")
if err != nil || location.Lat != -37.81753 || location.Lng != 144.96715 {
t.Error("TestGeocode() failed", err, location)
}
}
func TestReverseGeocode(t *testing.T) {
address, err := geocoder.ReverseGeocode(-37.816742, 144.964463)
if err != nil || !strings.HasSuffix(address, "VIC 3000, Australia") {
t.Error("TestReverseGeocode() failed", err, address)
}
}
func TestReverseGeocodeWithNoResult(t *testing.T) {
_, err := geocoder.ReverseGeocode(-37.816742, 164.964463)
if err != geo.ErrNoResult {
t.Error("TestReverseGeocodeWithNoResult() failed", err)
}
}
|
/*
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows.
NOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)
*/
package main
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
func init() {
b, _ := ioutil.ReadFile("p67triangle.txt")
s := strings.Replace(string(b), "\n", " ", -1)
numData := strings.Split(s, " ")
for _, n := range numData {
number, err := strconv.Atoi(n)
if err == nil {
data = append(data, number)
}
}
}
func main() {
buildTriangle()
fmt.Println(getMaxTreeSum(triangle[0]))
}
type tree struct {
left, right *tree
value, id int
}
var data []int
var triangle = make(map[int]*tree)
var maxTreeSum = make(map[int]int)
func getMaxTreeSum(t *tree) int {
sum := 0
if t != nil {
var ok bool
if sum, ok = maxTreeSum[t.id]; !ok {
maxLeft := getMaxTreeSum(t.left)
maxRight := getMaxTreeSum(t.right)
if maxLeft > maxRight {
sum = t.value + maxLeft
} else {
sum = t.value + maxRight
}
}
maxTreeSum[t.id] = sum
}
return sum
}
func buildTriangle() {
rowLen, remainingInRow := 1, 1
for i := 0; i < len(data); i, remainingInRow = i+1, remainingInRow-1 {
if remainingInRow == 0 {
rowLen++
remainingInRow = rowLen
}
t := getTree(i)
t.left = getTree(i + rowLen)
t.right = getTree(i + rowLen + 1)
}
}
func getTree(index int) *tree {
t := triangle[index]
if t == nil && index < len(data) {
t = &tree{id: index, value: data[index]}
triangle[index] = t
}
return t
}
|
package types
import (
"encoding/json"
"time"
)
// DelayQueueData struct
type DelayQueueData struct {
Data string `json:"data"` // the queue origin data
DelayTime int `json:"delaytime"` // the delay time, unit is second
TriggerTime time.Time `json:"triggertime"` // the unix timestamp to trigger
}
// SerializeDelayQueueData serialize delay queue data for a auto run delay queue
func SerializeDelayQueueData(data string, delayTime int) ([]byte, error) {
// Json encode
delayQueueData := &DelayQueueData{
Data: data,
DelayTime: delayTime,
TriggerTime: time.Now().Add(time.Duration(delayTime) * time.Second),
}
jsonStr, err := json.Marshal(delayQueueData)
if nil != err {
return []byte{}, err
}
return jsonStr, nil
}
// UnserializeDelayQueueData unserialize delay queue data for a auto run delay queue
func UnserializeDelayQueueData(runMode string, data string, delayOnFailure []int) (string, int, int, error) {
queueData := ""
nextDelayTime := 0
delayTime := 0
// log.Printf("runMode %s ", runMode)
// log.Printf("data %s ", data)
// log.Printf("delayOnFailure %v ", delayOnFailure)
if "retry" == runMode {
delayQueueData := &DelayQueueData{}
err := json.Unmarshal([]byte(data), delayQueueData)
if nil != err {
return "", 0, 0, err
}
queueData = delayQueueData.Data
if len(delayOnFailure) > 0 {
for _, dt := range delayOnFailure {
if delayQueueData.DelayTime < dt {
nextDelayTime = dt
break
}
}
}
delayTime = delayQueueData.DelayTime
} else {
queueData = data
if len(delayOnFailure) > 0 {
nextDelayTime = delayOnFailure[0]
}
delayTime = 0
}
// log.Printf("delayTime %d ", delayTime)
// log.Printf("nextDelayTime %d \n", nextDelayTime)
return queueData, nextDelayTime, delayTime, nil
}
|
package main
import (
"fmt"
"net/rpc"
"os"
)
type Args struct {
A, B int
}
type Quotient struct {
Q, R int
}
func readArgs() Args {
var a, b int
fmt.Println("A: ")
fmt.Scanln(&a)
fmt.Println("B: ")
fmt.Scanln(&b)
return Args{a, b}
}
func checkError(str string, err error) {
if err != nil {
fmt.Println(str, err)
os.Exit(1)
}
}
func main() {
service := "localhost:4321"
client, err := rpc.Dial("tcp", service)
defer client.Close()
checkError("Dial: ", err)
fmt.Println("* - multiplicação")
fmt.Println("/ - divisão")
fmt.Println("+ - soma")
fmt.Println("- - subtração")
var op byte
fmt.Scanf("%c\n", &op)
switch op {
case '*':
args := readArgs()
var reply int
err = client.Call("Arith.Multiply", args, &reply)
checkError("Multiply: ", err)
fmt.Printf("%d * %d = %d\n", args.A, args.B, reply)
os.Exit(0)
case '-':
args := readArgs()
var reply int
err = client.Call("Arith.Subtraction", args, &reply)
checkError("Subtraction: ", err)
fmt.Printf("%d - %d = %d\n", args.A, args.B, reply)
os.Exit(0)
case '+':
args := readArgs()
var reply int
err = client.Call("Arith.Sum", args, &reply)
checkError("Sum: ", err)
fmt.Printf("%d + %d = %d\n", args.A, args.B, reply)
os.Exit(0)
case '/':
args := readArgs()
var reply Quotient
err = client.Call("Arith.Divide", args, &reply)
checkError("Divide: ", err)
fmt.Printf("%d / %d = (%d,%d)\n", args.A, args.B, reply.Q, reply.R)
os.Exit(0)
default:
fmt.Println("Opção inválida: ", op)
os.Exit(1)
}
}
|
package main
import (
"fmt"
"os"
"github.com/gin-gonic/gin"
"htmlparser/httpclient"
"htmlparser/handlers"
)
func main() {
// test connectity to spark
if ! httpclient.SparkDashboardIsRequestable() {
fmt.Println("FATAL : Cannot request spark dashboard :-(")
fmt.Println("\tAre the SPARK_DASHBOARD_URL, SPARK_LOGIN and SPARK_PASSWORD env var correctly set ?")
fmt.Println("\tbye bye")
os.Exit(-1)
}
router := gin.Default()
prometheus := router.Group("/")
{
prometheus.GET("/", handlers.Prometheus)
prometheus.GET("/metrics", handlers.Prometheus)
}
csv := router.Group("/csv")
{
csv.GET("/", handlers.Csv)
}
// By default it serves on :8080 unless a PORT environment variable was defined.
router.Run()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.