text
stringlengths 11
4.05M
|
|---|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// The MIT License (MIT)
//
// Copyright (c) 2014 wandoulabs
// Copyright (c) 2014 siddontang
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
package internal
import (
"bufio"
"bytes"
"compress/zlib"
"io"
"time"
"github.com/klauspost/compress/zstd"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/server/err"
"github.com/pingcap/tidb/server/internal/util"
server_metrics "github.com/pingcap/tidb/server/metrics"
"github.com/pingcap/tidb/sessionctx/variable"
)
const defaultWriterSize = 16 * 1024
// PacketIO is a helper to read and write data in packet format.
// MySQL Packets: https://dev.mysql.com/doc/internals/en/mysql-packet.html
type PacketIO struct {
bufReadConn *util.BufferedReadConn
bufWriter *bufio.Writer
compressedWriter *compressedWriter
readTimeout time.Duration
// maxAllowedPacket is the maximum size of one packet in ReadPacket.
maxAllowedPacket uint64
// accumulatedLength count the length of totally received 'payload' in ReadPacket.
accumulatedLength uint64
compressionAlgorithm int
zstdLevel zstd.EncoderLevel
sequence uint8
compressedSequence uint8
}
// NewPacketIO creates a new PacketIO with given net.Conn.
func NewPacketIO(bufReadConn *util.BufferedReadConn) *PacketIO {
p := &PacketIO{sequence: 0, compressionAlgorithm: mysql.CompressionNone, compressedSequence: 0, zstdLevel: 3}
p.SetBufferedReadConn(bufReadConn)
p.SetMaxAllowedPacket(variable.DefMaxAllowedPacket)
return p
}
// NewPacketIOForTest creates a new PacketIO with given bufio.Writer.
func NewPacketIOForTest(bufWriter *bufio.Writer) *PacketIO {
p := &PacketIO{}
p.SetBufWriter(bufWriter)
return p
}
// SetZstdLevel sets the zstd compression level.
func (p *PacketIO) SetZstdLevel(level zstd.EncoderLevel) {
p.zstdLevel = level
}
// Sequence returns the sequence of PacketIO.
func (p *PacketIO) Sequence() uint8 {
return p.sequence
}
// SetSequence sets the sequence of PacketIO.
func (p *PacketIO) SetSequence(s uint8) {
p.sequence = s
}
// SetCompressedSequence sets the compressed sequence of PacketIO.
func (p *PacketIO) SetCompressedSequence(s uint8) {
p.compressedSequence = s
}
// SetBufWriter sets the bufio.Writer of PacketIO.
func (p *PacketIO) SetBufWriter(bufWriter *bufio.Writer) {
p.bufWriter = bufWriter
}
// ResetBufWriter resets the bufio.Writer of PacketIO.
func (p *PacketIO) ResetBufWriter(w io.Writer) {
p.bufWriter.Reset(w)
}
// SetCompressionAlgorithm sets the compression algorithm of PacketIO.
func (p *PacketIO) SetCompressionAlgorithm(ca int) {
p.compressionAlgorithm = ca
p.compressedWriter = newCompressedWriter(p.bufReadConn, ca)
p.compressedWriter.zstdLevel = p.zstdLevel
p.bufWriter.Flush()
}
// SetBufferedReadConn sets the BufferedReadConn of PacketIO.
func (p *PacketIO) SetBufferedReadConn(bufReadConn *util.BufferedReadConn) {
p.bufReadConn = bufReadConn
p.bufWriter = bufio.NewWriterSize(bufReadConn, defaultWriterSize)
}
// SetReadTimeout sets the read timeout of PacketIO.
func (p *PacketIO) SetReadTimeout(timeout time.Duration) {
p.readTimeout = timeout
}
func (p *PacketIO) readOnePacket() ([]byte, error) {
var header [4]byte
r := io.NopCloser(p.bufReadConn)
if p.readTimeout > 0 {
if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil {
return nil, err
}
}
if p.compressionAlgorithm != mysql.CompressionNone {
var compressedHeader [7]byte
if _, err := io.ReadFull(p.bufReadConn, compressedHeader[:]); err != nil {
return nil, errors.Trace(err)
}
compressedSequence := compressedHeader[3]
if compressedSequence != p.compressedSequence {
return nil, err.ErrInvalidSequence.GenWithStack(
"invalid compressed sequence %d != %d", compressedSequence, p.compressedSequence)
}
p.compressedSequence++
p.compressedWriter.compressedSequence = p.compressedSequence
uncompressedLength := int(uint32(compressedHeader[4]) | uint32(compressedHeader[5])<<8 | uint32(compressedHeader[6])<<16)
if uncompressedLength > 0 {
switch p.compressionAlgorithm {
case mysql.CompressionZlib:
var err error
r, err = zlib.NewReader(p.bufReadConn)
if err != nil {
return nil, errors.Trace(err)
}
case mysql.CompressionZstd:
zstdReader, err := zstd.NewReader(p.bufReadConn, zstd.WithDecoderConcurrency(1))
if err != nil {
return nil, errors.Trace(err)
}
r = zstdReader.IOReadCloser()
default:
return nil, errors.New("Unknown compression algorithm")
}
}
}
if _, err := io.ReadFull(r, header[:]); err != nil {
return nil, errors.Trace(err)
}
sequence := header[3]
if sequence != p.sequence {
return nil, err.ErrInvalidSequence.GenWithStack("invalid sequence %d != %d", sequence, p.sequence)
}
p.sequence++
length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
// Accumulated payload length exceeds the limit.
if p.accumulatedLength += uint64(length); p.accumulatedLength > p.maxAllowedPacket {
terror.Log(err.ErrNetPacketTooLarge)
return nil, err.ErrNetPacketTooLarge
}
data := make([]byte, length)
if p.readTimeout > 0 {
if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil {
return nil, err
}
}
if _, err := io.ReadFull(r, data); err != nil {
return nil, errors.Trace(err)
}
err := r.Close()
if err != nil {
return nil, errors.Trace(err)
}
return data, nil
}
// SetMaxAllowedPacket sets the max allowed packet size of PacketIO.
func (p *PacketIO) SetMaxAllowedPacket(maxAllowedPacket uint64) {
p.maxAllowedPacket = maxAllowedPacket
}
// ReadPacket reads a packet from the connection.
func (p *PacketIO) ReadPacket() ([]byte, error) {
p.accumulatedLength = 0
if p.readTimeout == 0 {
if err := p.bufReadConn.SetReadDeadline(time.Time{}); err != nil {
return nil, errors.Trace(err)
}
}
data, err := p.readOnePacket()
if err != nil {
return nil, errors.Trace(err)
}
if len(data) < mysql.MaxPayloadLen {
server_metrics.ReadPacketBytes.Add(float64(len(data)))
return data, nil
}
// handle multi-packet
for {
buf, err := p.readOnePacket()
if err != nil {
return nil, errors.Trace(err)
}
data = append(data, buf...)
if len(buf) < mysql.MaxPayloadLen {
break
}
}
server_metrics.ReadPacketBytes.Add(float64(len(data)))
return data, nil
}
// WritePacket writes data that already have header
func (p *PacketIO) WritePacket(data []byte) error {
length := len(data) - 4
server_metrics.WritePacketBytes.Add(float64(len(data)))
maxPayloadLen := mysql.MaxPayloadLen
if p.compressionAlgorithm != mysql.CompressionNone {
maxPayloadLen -= 4
}
for length >= maxPayloadLen {
data[3] = p.sequence
data[0] = 0xff
data[1] = 0xff
data[2] = 0xff
if p.compressionAlgorithm != mysql.CompressionNone {
if n, err := p.compressedWriter.Write(data[:4+maxPayloadLen]); err != nil {
return errors.Trace(mysql.ErrBadConn)
} else if n != (4 + maxPayloadLen) {
return errors.Trace(mysql.ErrBadConn)
}
} else {
if n, err := p.bufWriter.Write(data[:4+maxPayloadLen]); err != nil {
return errors.Trace(mysql.ErrBadConn)
} else if n != (4 + maxPayloadLen) {
return errors.Trace(mysql.ErrBadConn)
}
}
p.sequence++
length -= maxPayloadLen
data = data[maxPayloadLen:]
}
data[3] = p.sequence
data[0] = byte(length)
data[1] = byte(length >> 8)
data[2] = byte(length >> 16)
if p.compressionAlgorithm != mysql.CompressionNone {
if n, err := p.compressedWriter.Write(data); err != nil {
terror.Log(errors.Trace(err))
return errors.Trace(mysql.ErrBadConn)
} else if n != len(data) {
return errors.Trace(mysql.ErrBadConn)
} else {
p.sequence++
return nil
}
} else {
if n, err := p.bufWriter.Write(data); err != nil {
terror.Log(errors.Trace(err))
return errors.Trace(mysql.ErrBadConn)
} else if n != len(data) {
return errors.Trace(mysql.ErrBadConn)
} else {
p.sequence++
return nil
}
}
}
// Flush flushes buffered data to network.
func (p *PacketIO) Flush() error {
var err error
if p.compressionAlgorithm != mysql.CompressionNone {
err = p.compressedWriter.Flush()
} else {
err = p.bufWriter.Flush()
}
if err != nil {
return errors.Trace(err)
}
return err
}
func newCompressedWriter(w io.Writer, ca int) *compressedWriter {
return &compressedWriter{
w,
new(bytes.Buffer),
ca,
0,
3,
}
}
type compressedWriter struct {
w io.Writer
buf *bytes.Buffer
compressionAlgorithm int
compressedSequence uint8
zstdLevel zstd.EncoderLevel
}
func (cw *compressedWriter) Write(data []byte) (n int, err error) {
// MySQL starts with `net_buffer_length` (default 16384) and larger packets after that.
// The length itself must fit in the 3 byte field in the header.
// Can't be bigger then the max value for `net_buffer_length` (1048576)
maxCompressedSize := 1048576 // 1 MiB
for {
remainingLen := maxCompressedSize - cw.buf.Len()
if len(data) <= remainingLen {
written, err := cw.buf.Write(data)
if err != nil {
return 0, err
}
return n + written, nil
}
written, err := cw.buf.Write(data[:remainingLen])
if err != nil {
return 0, err
}
n += written
data = data[remainingLen:]
err = cw.Flush()
if err != nil {
return 0, err
}
}
}
func (cw *compressedWriter) Flush() error {
var payload, compressedPacket bytes.Buffer
var w io.WriteCloser
var err error
// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_compression_packet.html
// suggests a MIN_COMPRESS_LENGTH of 50.
minCompressLength := 50
data := cw.buf.Bytes()
cw.buf.Reset()
switch cw.compressionAlgorithm {
case mysql.CompressionZlib:
w, err = zlib.NewWriterLevel(&payload, zlib.HuffmanOnly)
case mysql.CompressionZstd:
w, err = zstd.NewWriter(&payload, zstd.WithEncoderLevel(cw.zstdLevel))
default:
return errors.New("Unknown compression algorithm")
}
if err != nil {
return errors.Trace(err)
}
uncompressedLength := 0
compressedHeader := make([]byte, 7)
if len(data) > minCompressLength {
uncompressedLength = len(data)
_, err := w.Write(data)
if err != nil {
return errors.Trace(err)
}
err = w.Close()
if err != nil {
return errors.Trace(err)
}
}
var compressedLength int
if len(data) > minCompressLength {
compressedLength = len(payload.Bytes())
} else {
compressedLength = len(data)
}
compressedHeader[0] = byte(compressedLength)
compressedHeader[1] = byte(compressedLength >> 8)
compressedHeader[2] = byte(compressedLength >> 16)
compressedHeader[3] = cw.compressedSequence
compressedHeader[4] = byte(uncompressedLength)
compressedHeader[5] = byte(uncompressedLength >> 8)
compressedHeader[6] = byte(uncompressedLength >> 16)
_, err = compressedPacket.Write(compressedHeader)
if err != nil {
return errors.Trace(err)
}
cw.compressedSequence++
if len(data) > minCompressLength {
_, err = compressedPacket.Write(payload.Bytes())
} else {
_, err = compressedPacket.Write(data)
}
if err != nil {
return errors.Trace(err)
}
w.Close()
_, err = cw.w.Write(compressedPacket.Bytes())
if err != nil {
return errors.Trace(err)
}
return nil
}
|
package common
import (
"encoding/hex"
"encoding/json"
"fmt"
"testing"
)
func TestNewTransaction(t *testing.T) {
tx := new(Transaction)
tx.Txid = "hgs"
data, _ := json.Marshal(tx)
fmt.Println(hex.EncodeToString(data))
}
|
/*
A trivial application to illustrate how the blockartlib library can be
used from an application in project 1 for UBC CS 416 2017W2.
Usage:
go run art-app.go
*/
package main
// Expects blockartlib.go to be in the ./blockartlib/ dir, relative to
// this art-app.go file
import "./blockartlib"
import (
"crypto/x509"
"encoding/hex"
"fmt"
"os"
)
func main() {
<<<<<<< HEAD
// <svg>
// <path d="M 480 40 L 430 120 L 480 150 L 520 120 H 520 L 480 40" fill="red" stroke="red"></path>
// <path d="M 420 130 L 350 230 L 480 300 V 160 L 420 130" fill="transparent" stroke="red"> </path>
// <path d="M 490 160 L 530 140 L 610 240 L 490 300 Z" fill="blue" stroke="blue"></path>
// <path d="M 761 78 L 741 58 H 711 L 691 78 V 98 L 711 118 H 721 L 758 117 L 770 140 V 160 L 750 180 H 710 L 690 160" fill="transparent" stroke="green"></path>
// <path d="M 700 40 L 720 200" fill="transparent" stroke="green"></path>
// <path d="M 720 40 L 740 200" fill="transparent" stroke="green"></path>
// <path d="M 280 140 L 560 50" fill="transparent" stroke="red"></path>
// <path d="M 280 140 L 560 50" fill="transpraent" stroke="purple"><path>
// </svg>
minerAddr := "127.0.0.1:50417"
=======
minerAddr := "127.0.0.1:34492"
>>>>>>> ubc/master
privKeyString := "3081a4020101043069f5ffffd085b51a78166f766330f4771674d8cadfd4bc3556082d59fcaa8d56a74e487e125318c0abb0c71e3852b341a00706052b81040022a164036200043bda4ebb0d9f3d2270e41ce140b889bdb94e889fb7c1b0f082c9919bbb5cde31af295da333e5f216336bface06843b0f5ef7b36d1ab0b28bbe458559b8d48df15763e2e6e955f8102aca1c5e8413a248547ece44bc1be5326debc14cb8add5ed"
privateKeyBytes, _ := hex.DecodeString(privKeyString)
privKey, _ := x509.ParseECPrivateKey(privateKeyBytes)
// TODO: use crypto/ecdsa to read pub/priv keys from a file argument.
// Open a canvas.
canvas, settings, err := blockartlib.OpenCanvas(minerAddr, *privKey)
if checkError(err) != nil {
//return
}
fmt.Println(canvas)
fmt.Println(settings)
validateNum := uint8(6)
// Add a line.
shapeHash, blockHash, ink, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 0 0 L 0 5", "transparent", "red")
if checkError(err) != nil {
return
}
fmt.Println("added a line:", shapeHash, blockHash, ink)
// Add another line.
shapeHash2, blockHash2, ink2, err := canvas.AddShape(validateNum, blockartlib.PATH, "M 0 0 L 5 0", "transparent", "blue")
if checkError(err) != nil {
return
}
fmt.Println("added another line", shapeHash2, blockHash2, ink2)
fmt.Println("deleting a line!")
// Delete the first line.
ink3, err := canvas.DeleteShape(validateNum, shapeHash)
if checkError(err) != nil {
return
}
fmt.Println("deleted a line", ink3)
// assert ink3 > ink2
// Close the canvas.
ink4, err := canvas.CloseCanvas()
if checkError(err) != nil {
return
}
fmt.Println("closed canvas", ink4)
}
// If error is non-nil, print it out and return it.
func checkError(err error) error {
if err != nil {
fmt.Fprintln(os.Stderr, "Error ", err.Error())
return err
}
return nil
}
|
package main
import (
"context"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/husseinhammoud/cdktesting-backend/lib/models/API_Responses"
"github.com/husseinhammoud/cdktesting-backend/lib/models/Address"
)
type Event struct {
Sub string `json:"sub"`
}
var sess *session.Session
var ddb *dynamodb.DynamoDB
func HandleLambdaFunction(ctx context.Context, request Event) (API_Responses.Response, error) {
if sess == nil || ddb == nil {
Address.Init(&sess, &ddb)
}
// TODO: crud
}
func main() {
lambda.Start(HandleLambdaFunction)
}
|
package main
import (
"math/rand"
tl "github.com/JoelOtter/termloop"
)
func newStageTwo() *Stage {
const textGap = 100
var textCandidates = []string{
"Transformation",
"InfrastructureAsCode",
"ContinuousIntegration",
"ContinuousDeployment",
"Waterfall",
"Blockchain",
"Microservices",
"MachineLearning",
"Automation",
"InternetOfThings",
"VirtualReality",
"SearchEngineOptimization",
"Serverless",
"Containers",
"DataMining",
}
ret := &Stage{
Title: "2",
Background: tl.Cell{
Bg: tl.RgbTo256Color(30, 70, 30),
Fg: tl.ColorBlack,
Ch: ' ',
},
}
// Generate starting position
screenWidth, screenHeight := game.Screen().Size()
// Noise
ret.Noise = makeNoise(len(textCandidates)*textGap+screenWidth, screenHeight, "⚘", ret.Background.Fg, ret.Background.Bg)
// Text
for i, text := range textCandidates {
// Right edge of screen + Offset by previous slots + Random offset within its slot
x := screenWidth + (i * textGap) + rand.Intn(textGap)
y := rand.Intn(screenHeight-7) + 4 // Box it in by 3 so that ship bullets can actually hit it
ret.Entities = append(ret.Entities, newText(text, x, y))
}
return ret
}
|
package mongomodel
import (
"time"
)
type DockModel struct {
View *DailyDockView
Typemap map[int]int
}
func NewDockModel(date time.Time) *DockModel {
errormodel := DockModel{
View: newDailyDockView(date),
Typemap: make(map[int]int),
}
errormodel.Typemap[0] = 0
errormodel.Typemap[10] = 1
errormodel.Typemap[20] = 2
errormodel.Typemap[30] = 3
errormodel.Typemap[40] = 4
errormodel.Typemap[50] = 5
errormodel.Typemap[60] = 6
return &errormodel
}
type DailyDockView struct {
Date time.Time
ViewData []item
}
func newDailyDockView(date time.Time) *DailyDockView {
view := DailyDockView{
Date: date,
}
view.ViewData = append(view.ViewData,
item{Key: "Unknown", Value: 0},
item{Key: "BackToDockSucc", Value: 0},
item{Key: "BackToDockTrapAndNearDock", Value: 0},
item{Key: "BackToOriginSucc", Value: 0},
item{Key: "BackToOriginFail", Value: 0},
item{Key: "BackToDockNoPower", Value: 0},
item{Key: "DockResult_Malfunctioning", Value: 0})
return &view
}
|
package bolt
import (
"log"
"encoding/json"
"webapp/entities"
)
// This type/struct stores no state, it’s just a collection of methods
type LanguageDAOBolt struct {
bucketName string
}
func NewLanguageDAOBolt() LanguageDAOBolt {
// Creates a DAO using the "language" bucket
return LanguageDAOBolt{"language"}
}
// Converts the given JSON string to structure ( unmarshalling )
func (this *LanguageDAOBolt) jsonToStruct(value string) entities.Language {
language := entities.Language{}
err := json.Unmarshal([]byte(value), &language)
if ( err != nil ) {
panic(err)
}
return language
}
// Converts the given structure to a JSON string ( marshalling )
func (this *LanguageDAOBolt) structToJson(language entities.Language) string {
json, err := json.Marshal(language)
if ( err != nil ) {
panic(err)
}
// conversion : []byte to string
return string(json[:])
}
func (this *LanguageDAOBolt) FindAll() []entities.Language {
log.Print("DAO - FindAll() ")
languages := make([]entities.Language,0)
values := dbGetAll(this.bucketName)
for _, v := range values {
language := this.jsonToStruct(v)
languages = append(languages, language)
}
return languages
}
func (this *LanguageDAOBolt) Find(code string) *entities.Language {
log.Printf("DAO - Find(%s) ", code)
value := dbGet(this.bucketName, code)
if value != "" {
language := this.jsonToStruct(value)
return &language
} else {
return nil
}
}
func (this *LanguageDAOBolt) Exists(code string) bool {
log.Printf("DAO - Exists(%s) ", code)
exists := false
if ( dbGet(this.bucketName, code) != "" ) {
exists = true
}
return exists
}
func (this *LanguageDAOBolt) Create(language entities.Language) bool {
log.Printf("DAO - Create(%s) ", language.Code)
if this.Exists(language.Code) {
// already exists => cannot create !
log.Printf("DAO - Create(%s) : already exists => cannot create", language.Code)
return false
} else {
// not found => create
log.Printf("DAO - Create(%s) : not found => created", language.Code)
this.Save(language)
return true
}
}
func (this *LanguageDAOBolt) Delete(code string) bool {
log.Printf("DAO - Delete(%s) ", code)
if this.Exists(code) {
dbDelete(this.bucketName, code) // delete in Bolt DB
return true // found and deleted
} else {
return false // not found => not deleted
}
}
func (this *LanguageDAOBolt) Update(language entities.Language) bool {
log.Printf("DAO - Update(%s) ", language.Code)
if this.Exists(language.Code) {
this.Save(language) // update in Bolt DB
return true // found and updated
} else {
return false // not found => not updated
}
}
func (this *LanguageDAOBolt) Save(language entities.Language) {
log.Printf("DAO - Save(%s) ", language.Code)
key := language.Code
// // value : language converted to JSON
// value, err := json.Marshal(language)
// if ( err != nil ) {
// panic(err)
// }
// dbPut(this.bucketName, key, string(value[:]))
value := this.structToJson(language)
dbPut(this.bucketName, key, value)
}
|
package slices
import (
"fmt"
"math/rand"
"sort"
"strings"
"time"
"github.com/life4/genesis/constraints"
)
// Choice chooses a random element from the slice.
// If seed is zero, UNIX timestamp will be used.
func Choice[S ~[]T, T any](items S, seed int64) (T, error) {
if len(items) == 0 {
var tmp T
return tmp, ErrEmpty
}
if seed == 0 {
seed = time.Now().UnixNano()
}
rand.Seed(seed)
i := rand.Intn(len(items))
return items[i], nil
}
// ChunkEvery returns slice of slices containing count elements each
func ChunkEvery[S ~[]T, T any](items S, count int) ([]S, error) {
chunks := make([]S, 0)
if count <= 0 {
return chunks, ErrNegativeValue
}
chunk := make([]T, 0, count)
for i, el := range items {
chunk = append(chunk, el)
if (i+1)%count == 0 {
chunks = append(chunks, chunk)
chunk = make([]T, 0, count)
}
}
if len(chunk) > 0 {
chunks = append(chunks, chunk)
}
return chunks, nil
}
// Contains returns true if el in arr.
func Contains[S ~[]T, T comparable](items S, el T) bool {
for _, val := range items {
if val == el {
return true
}
}
return false
}
// Count return count of el occurrences in arr.
func Count[S ~[]T, T comparable](items S, el T) int {
count := 0
for _, val := range items {
if val == el {
count++
}
}
return count
}
// Copy creates a copy of the given slice.
func Copy[S ~[]T, T any](items S) S {
if items == nil {
return nil
}
var res S
return append(res, items...)
}
// Cycle is an infinite loop over slice
func Cycle[S ~[]T, T any](items S) chan T {
c := make(chan T)
go func() {
defer close(c)
if len(items) == 0 {
return
}
for {
for _, val := range items {
c <- val
}
}
}()
return c
}
// Dedup returns a given slice without consecutive duplicated elements
func Dedup[S ~[]T, T comparable](items S) S {
if len(items) == 0 {
return items
}
result := make(S, 1, len(items))
prev := items[0]
result[0] = prev
for _, el := range items[1:] {
if el != prev {
result = append(result, el)
prev = el
}
}
return result
}
// Delete deletes the first occurrence of the element from the slice
func Delete[S ~[]T, T comparable](items S, element T) S {
if len(items) == 0 {
return items
}
result := make([]T, 0, len(items))
deleted := false
for _, el := range items {
if !deleted && el == element {
deleted = true
continue
}
result = append(result, el)
}
return result
}
// DeleteAll deletes all occurrences of the element from the slice
func DeleteAll[S ~[]T, T comparable](items S, element T) S {
if len(items) == 0 {
return items
}
result := make([]T, 0, len(items))
for _, el := range items {
if el == element {
continue
}
result = append(result, el)
}
return result
}
// DeleteAt returns the slice without elements on given positions
func DeleteAt[S ~[]T, T any](items S, indices ...int) (S, error) {
if len(indices) == 0 || len(items) == 0 {
return items, nil
}
for _, index := range indices {
if index >= len(items) {
return items, ErrOutOfRange
}
}
result := make([]T, 0, len(items)-1)
for i, el := range items {
add := true
for _, index := range indices {
if i == index {
add = false
break
}
}
if add {
result = append(result, el)
}
}
return result, nil
}
// DropEvery returns a slice of every nth element in the enumerable dropped,
// starting with the first element.
func DropEvery[S ~[]T, T any](items S, nth int, from int) (S, error) {
if nth <= 0 {
return items, ErrNonPositiveValue
}
if from < 0 {
return items, ErrNegativeValue
}
result := make([]T, 0, len(items)/nth)
for i, el := range items {
if (i+from)%nth != 0 {
result = append(result, el)
}
}
return result, nil
}
// EndsWith returns true if slice ends with the given suffix slice.
// If suffix is empty, it returns true.
func EndsWith[S ~[]T, T comparable](items S, suffix S) bool {
if len(suffix) > len(items) {
return false
}
start := len(items) - len(suffix)
for i, el := range suffix {
if el != items[start+i] {
return false
}
}
return true
}
// Equal returns true if slices are equal
func Equal[S1 ~[]T, S2 ~[]T, T comparable](items S1, other S2) bool {
if len(items) != len(other) {
return false
}
for i, el := range other {
if items[i] != el {
return false
}
}
return true
}
// Grow increases the slice's by n elements.
// So, for cap(slice)=8 and n=2, the result will have cap at least 10.
// The function can be used to reduce allocations when inserting more elements
// into an existing slice.
func Grow[S ~[]T, T any](items S, n int) S {
return append(items, make(S, n)...)[:len(items)]
}
// Shrink removes unused capacity from the slice.
func Shrink[S ~[]T, T any](items S) S {
return items[:len(items):len(items)]
}
// Join concatenates elements of the slice to create a single string.
func Join[S ~[]T, T any](items S, sep string) string {
strs := make([]string, 0, len(items))
for _, el := range items {
strs = append(strs, fmt.Sprintf("%v", el))
}
return strings.Join(strs, sep)
}
// Index returns the index of the first occurrence of item in items.
func Index[S []T, T comparable](items S, item T) (int, error) {
for i, val := range items {
if val == item {
return i, nil
}
}
return 0, ErrNotFound
}
// InsertAt returns the items slice with the item inserted at the given index.
func InsertAt[S ~[]T, T any](items S, index int, item T) (S, error) {
result := make([]T, 0, len(items)+1)
// insert at the end
if index == len(items) {
result = append(result, items...)
result = append(result, item)
return result, nil
}
if index > len(items) {
return items, ErrOutOfRange
}
if index < 0 {
return items, ErrNegativeValue
}
for i, el := range items {
if i == index {
result = append(result, item)
}
result = append(result, el)
}
return result, nil
}
// Intersperse inserts el between each element of arr
func Intersperse[S ~[]T, T any](items S, el T) S {
if len(items) == 0 {
return items
}
result := make([]T, 0, len(items)*2-1)
result = append(result, items[0])
for _, val := range items[1:] {
result = append(result, el, val)
}
return result
}
// Last returns the last element from the slice
func Last[S ~[]T, T any](items S) (T, error) {
if len(items) == 0 {
var tmp T
return tmp, ErrEmpty
}
return items[len(items)-1], nil
}
// Max returns the maximal element from arr
func Max[S ~[]T, T constraints.Ordered](items S) (T, error) {
if len(items) == 0 {
var tmp T
return tmp, ErrEmpty
}
max := items[0]
for _, el := range items[1:] {
if el > max {
max = el
}
}
return max, nil
}
// Min returns the minimal element from arr
func Min[S ~[]T, T constraints.Ordered](items S) (T, error) {
if len(items) == 0 {
var tmp T
return tmp, ErrEmpty
}
min := items[0]
for _, el := range items[1:] {
if el < min {
min = el
}
}
return min, nil
}
// Permutations returns successive size-length permutations of elements from the slice.
// {1, 2, 3} -> {1, 2}, {1, 3}, {2, 1}, {2, 3}, {3, 1}, {3, 2}
func Permutations[T any](items []T, size int) chan []T {
c := make(chan []T, 1)
go func() {
if len(items) > 0 {
permutations(items, c, size, []T{}, items)
}
close(c)
}()
return c
}
// permutations is a core implementation for Permutations
func permutations[T any](items []T, c chan []T, size int, left []T, right []T) {
if len(left) == size || len(right) == 0 {
c <- left
return
}
for i, el := range right {
newLeft := make([]T, 0, len(left)+1)
newLeft = append(newLeft, left...)
newLeft = append(newLeft, el)
newRight := make([]T, 0, len(right)-1)
for j, other := range right {
if j != i {
newRight = append(newRight, other)
}
}
permutations(items, c, size, newLeft, newRight)
}
}
// Product returns cortesian product of elements
// {{1, 2}, {3, 4}} -> {1, 3}, {1, 4}, {2, 3}, {2, 4}
func Product[S ~[]T, T any](items S, repeat int) chan []T {
c := make(chan []T, 1)
go func() {
defer close(c)
if repeat < 1 {
return
}
product(items, c, repeat, []T{}, 0)
}()
return c
}
// product is a core implementation for Product
func product[S ~[]T, T any](items S, c chan []T, repeat int, left []T, pos int) {
// iterate over the last array
if pos == repeat-1 {
for _, el := range items {
result := make([]T, 0, len(left)+1)
result = append(result, left...)
result = append(result, el)
c <- result
}
return
}
for _, el := range items {
result := make([]T, 0, len(left)+1)
result = append(result, left...)
result = append(result, el)
product(items, c, repeat, result, pos+1)
}
}
// Reverse returns given arr in reversed order
func Reverse[S ~[]T, T any](items S) S {
if len(items) <= 1 {
return items
}
result := make([]T, 0, len(items))
for i := len(items) - 1; i >= 0; i-- {
result = append(result, items[i])
}
return result
}
// Repeat repeats items slice n times.
func Repeat[S ~[]T, T any](items S, n int) S {
result := make([]T, 0, len(items)*n)
for i := 0; i < n; i++ {
result = append(result, items...)
}
return result
}
// Same returns true if all element in arr the same
func Same[S ~[]T, T comparable](items S) bool {
if len(items) <= 1 {
return true
}
for i := 0; i < len(items)-1; i++ {
if items[i] != items[i+1] {
return false
}
}
return true
}
// Shuffle in random order the given elements
//
// This is an in-place operation, it modifies the passed slice.
func Shuffle[S ~[]T, T any](items S, seed int64) {
if len(items) <= 1 {
return
}
if seed == 0 {
seed = time.Now().UnixNano()
}
rand.Seed(seed)
swap := func(i, j int) {
items[i], items[j] = items[j], items[i]
}
rand.Shuffle(len(items), swap)
}
// Sort returns sorted slice
func Sort[S ~[]T, T constraints.Ordered](items S) S {
if len(items) <= 1 {
return items
}
less := func(i int, j int) bool {
return items[i] < items[j]
}
sort.SliceStable(items, less)
return items
}
// Sorted returns true if slice is sorted
func Sorted[S ~[]T, T constraints.Ordered](items S) bool {
if len(items) <= 1 {
return true
}
for i := 1; i < len(items); i++ {
if items[i-1] > items[i] {
return false
}
}
return true
}
// Split splits arr by sep
func Split[S ~[]T, T comparable](items S, sep T) []S {
result := make([]S, 0)
curr := make([]T, 0)
for _, el := range items {
if el == sep {
result = append(result, curr)
curr = make([]T, 0)
} else {
curr = append(curr, el)
}
}
result = append(result, curr)
return result
}
// StartsWith returns true if slice starts with the given prefix slice.
// If prefix is empty, it returns true.
func StartsWith[S ~[]T, T comparable](items S, prefix S) bool {
if len(prefix) > len(items) {
return false
}
for i, el := range prefix {
if el != items[i] {
return false
}
}
return true
}
// Sum return sum of all elements from arr
func Sum[S ~[]T, T constraints.Ordered](items S) T {
var sum T
for _, el := range items {
sum += el
}
return sum
}
// TakeEvery returns slice of every nth elements
func TakeEvery[S ~[]T, T any](items S, nth int, from int) (S, error) {
if nth <= 0 {
return items, ErrNonPositiveValue
}
if from < 0 {
return items, ErrNegativeValue
}
result := make(S, 0, len(items))
for i, el := range items {
if (i+from)%nth == 0 {
result = append(result, el)
}
}
return result, nil
}
// TakeRandom returns slice of count random elements from the slice
func TakeRandom[S ~[]T, T any](items S, count int, seed int64) (S, error) {
if count > len(items) {
return nil, ErrOutOfRange
}
if count <= 0 {
return nil, ErrNonPositiveValue
}
if seed == 0 {
seed = time.Now().UnixNano()
}
rand.Seed(seed)
swap := func(i, j int) {
items[i], items[j] = items[j], items[i]
}
rand.Shuffle(len(items), swap)
return items[:count], nil
}
// ToChannel returns channel with elements from the slice
func ToChannel[S ~[]T, T any](items S) chan T {
c := make(chan T)
go func() {
for _, el := range items {
c <- el
}
close(c)
}()
return c
}
// ToMap converts the given slice into a map where keys are indices and values are items
// from the given slice.
func ToMap[S ~[]V, V any](items S) map[int]V {
if items == nil {
return nil
}
result := make(map[int]V)
for index, item := range items {
result[index] = item
}
return result
}
// ToMapGroupedBy converts the given slice into a map where keys are values returned
// from keyExtractor function and values are items from the given slice
func ToMapGroupedBy[V any, T comparable](items []V, keyExtractor func(V) T) map[T][]V {
result := make(map[T][]V)
for _, item := range items {
key := keyExtractor(item)
result[key] = append(result[key], item)
}
return result
}
// ToKeys converts the given slice into a map where items from the slice are the keys
// of the resulting map and all values are equal to the given `val` value.
func ToKeys[S ~[]K, K comparable, V any](items S, val V) map[K]V {
if items == nil {
return nil
}
result := make(map[K]V)
for _, item := range items {
result[item] = val
}
return result
}
// Uniq returns arr with only first occurrences of every element.
func Uniq[S ~[]T, T comparable](items S) S {
if len(items) <= 1 {
return items
}
added := make(map[T]struct{})
nothing := struct{}{}
result := make([]T, 0, len(items))
for _, el := range items {
_, exists := added[el]
if !exists {
result = append(result, el)
added[el] = nothing
}
}
return result
}
// Window makes sliding window for a given slice:
// ({1,2,3}, 2) -> (1,2), (2,3)
func Window[S ~[]T, T any](items S, size int) ([]S, error) {
if size <= 0 {
return nil, ErrNonPositiveValue
}
result := make([]S, 0, len(items)/size)
for i := 0; i <= len(items)-size; i++ {
chunk := items[i : i+size]
result = append(result, chunk)
}
return result, nil
}
// Without returns the slice with filtered out element
func Without[S ~[]T, T comparable](items S, elements ...T) S {
result := make(S, 0, len(items))
for _, el := range items {
allowed := true
for _, other := range elements {
if el == other {
allowed = false
}
}
if allowed {
result = append(result, el)
}
}
return result
}
// Wrap makes a single element slice out of the given value
func Wrap[T any](item T) []T {
return []T{item}
}
|
package sstable
import (
"bytes"
)
type bytesReaderCloser struct {
*bytes.Reader
}
func (bytesReaderCloser) Close() error {
return nil
}
func equalBytes(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
func equalStrings(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
func dummyTable() *SSTable {
data := []byte{'a', 'b', 'b', 'c', 'c', 'c'}
return &SSTable{
f: bytesReaderCloser{bytes.NewReader(data)},
r: []record{
record{"o", 2, 1, 0xa5e29763},
record{"p", 3, 3, 0x8e1dbfe5},
record{"q", 0, 0, 0xa282ead8},
record{"w", 1, 0, 0x28e46e78},
},
}
}
|
package ToDoApi
import (
"goRestData/ToDoService"
"net/http"
)
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type Routes []Route
var routes = Routes{
Route{
"Index",
"GET",
"/",
ToDoService.Index,
},
Route{
"List",
"GET",
"/List",
ToDoService.List,
},
Route{
"UpdateList",
"POST",
"/Update",
ToDoService.Update,
},
}
|
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"github.com/AdityaVallabh/swagger_meqa/meqa/mqutil"
"github.com/AdityaVallabh/swagger_meqa/meqa/mqswag"
"github.com/AdityaVallabh/swagger_meqa/meqa/mqplan"
)
const (
meqaDataDir = "meqa_data"
algoSimple = "simple"
algoObject = "object"
algoPath = "path"
algoAll = "all"
)
var algoList []string = []string{algoSimple, algoObject, algoPath}
func main() {
mqutil.Logger = mqutil.NewStdLogger()
swaggerJSONFile := filepath.Join(meqaDataDir, "swagger.yml")
meqaPath := flag.String("d", meqaDataDir, "the directory where we put the generated files")
swaggerFile := flag.String("s", swaggerJSONFile, "the swagger.yml file location")
algorithm := flag.String("a", "all", "the algorithm - simple, object, path, all")
verbose := flag.Bool("v", false, "turn on verbose mode")
allowedAPIsFile := flag.String("w", "", "name of the file (that lists out all fuzzable APIs) along with its relative path. Example testdata/allowedAPIs.cfg")
ignoredPathsFile := flag.String("i", "", "name of the file (that lists out all ignored paths in APIs) along with its relative path. Example testdata/ignorePaths.cfg")
flag.Parse()
run(meqaPath, swaggerFile, algorithm, verbose, allowedAPIsFile, ignoredPathsFile)
}
func run(meqaPath *string, swaggerFile *string, algorithm *string, verbose *bool, allowedAPIsFile *string, ignoredPathsFile *string) {
mqutil.Verbose = *verbose
swaggerJsonPath := *swaggerFile
if fi, err := os.Stat(swaggerJsonPath); os.IsNotExist(err) || fi.Mode().IsDir() {
fmt.Printf("Can't load swagger file at the following location %s", swaggerJsonPath)
os.Exit(1)
}
allowedAPIs := GetList(*allowedAPIsFile)
ignoredPaths := GetList(*ignoredPathsFile)
testPlanPath := *meqaPath
if fi, err := os.Stat(testPlanPath); os.IsNotExist(err) {
err = os.Mkdir(testPlanPath, 0755)
if err != nil {
fmt.Printf("Can't create the directory at %s\n", testPlanPath)
os.Exit(1)
}
} else if !fi.Mode().IsDir() {
fmt.Printf("The specified location is not a directory: %s\n", testPlanPath)
os.Exit(1)
}
// loading swagger.json
swagger, err := mqswag.CreateSwaggerFromURL(swaggerJsonPath, *meqaPath)
if err != nil {
mqutil.Logger.Printf("Error: %s", err.Error())
os.Exit(1)
}
dag := mqswag.NewDAG()
err = swagger.AddToDAG(dag)
if err != nil {
mqutil.Logger.Printf("Error: %s", err.Error())
os.Exit(1)
}
dag.Sort()
dag.CheckWeight()
var plansToGenerate []string
if *algorithm == algoAll {
plansToGenerate = algoList
} else {
plansToGenerate = append(plansToGenerate, *algorithm)
}
for _, algo := range plansToGenerate {
var testPlan *mqplan.TestPlan
switch algo {
case algoPath:
testPlan, err = mqplan.GeneratePathTestPlan(swagger, dag, allowedAPIs, ignoredPaths)
case algoObject:
testPlan, err = mqplan.GenerateTestPlan(swagger, dag)
default:
testPlan, err = mqplan.GenerateSimpleTestPlan(swagger, dag)
}
if err != nil {
mqutil.Logger.Printf("Error: %s", err.Error())
os.Exit(1)
}
testPlanFile := filepath.Join(testPlanPath, algo+".yml")
err = testPlan.DumpToFile(testPlanFile)
if err != nil {
mqutil.Logger.Printf("Error: %s", err.Error())
os.Exit(1)
}
fmt.Println("Test plans generated at:", testPlanFile)
}
}
func GetList(path string) map[string]bool {
if len(path) > 0 {
list, err := mqswag.GetListFromFile(path)
if err != nil {
fmt.Println("Can't read file at the following location:", path)
os.Exit(1)
}
return list
}
return nil
}
|
package main
import (
"crypto/rand"
"fmt"
"gm/sm2"
"io/ioutil"
)
func main(){
testSM2()
}
func testSM2(){
data, err := ioutil.ReadFile("test.txt")
//data := []byte{1, 2, 3, 4, 5, 6, 7}
fmt.Println("read:",string(data))
priv, pub, err := sm2.GenerateKey(rand.Reader)
if(err != nil){
fmt.Println(err)
return
}
fmt.Println("priv:", priv.D)
fmt.Println("pub.x:", pub.X)
fmt.Println("pub.y:", pub.Y)
Cipertext, erro := sm2.Encrypt(pub, data)
if(erro != nil){
fmt.Println(err)
return
}
fmt.Println("C1:", Cipertext.C1)
fmt.Println("C2:", Cipertext.C2)
fmt.Println("C3:", Cipertext.C3)
Entext, error := sm2.Decrypt(priv, Cipertext)
if(error != nil){
fmt.Println(err)
return
}
fmt.Println("entext:", string(Entext))
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"crypto/md5"
)
// Digest structure summarizing a CSV/TXT file for identification
type Digest struct {
preview []string // preview rows (excluding non-blank non-comment lines)
erows int // estimated total file rows
comment string // inferred comment line prefix
sep rune // inferred field separator rune (if CSV)
split []string // trimmed fields of first preview row split by "sep" (if CSV)
heading bool // first row probable heading (if CSV)
md5 string // hash of heading (if CSV with heading)
}
const (
previewRows = 6 // number of preview rows returned by PeekCSV
sepSet = ",\t|;:" // order of separator runes automatically checked if none specified
maxFieldLen = 256 // maximum field size allowed for PeekCSV to qualify a separator
)
var commentSet = [...]string{"#", "//", "'"}
// handleSig is a goroutine that monitors the "sig" channel; when closed, "sigv" is modified
func handleSig(sig <-chan int, sigv *int) {
go func() {
for *sigv = range sig {
}
*sigv = -1
}()
}
// readLn returns a channel into which a goroutine writes lines from file at "path" (channels
// also provided for errors and for the caller to signal a halt).
func readLn(path string) (<-chan string, <-chan error, chan<- int) {
out, err, sig, sigv := make(chan string, 64), make(chan error, 1), make(chan int), 0
go func() {
defer func() {
if e := recover(); e != nil {
err <- e.(error)
}
close(err)
close(out)
}()
file, e := os.Open(path)
if e != nil {
panic(fmt.Errorf("can't access %q (%v)", path, e))
}
defer file.Close()
handleSig(sig, &sigv)
ln := bufio.NewScanner(file)
for ; sigv == 0 && ln.Scan(); out <- ln.Text() {
}
if e := ln.Err(); e != nil {
panic(fmt.Errorf("problem reading %q (%v)", path, e))
}
}()
return out, err, sig
}
// splitCSV returns a slice of fields in "csv" split by "sep", approximately following RFC 4180
func splitCSV(csv string, sep rune) (fields []string) {
field, encl := "", false
for _, r := range csv {
switch {
case r > '\x7e' || r != '\x09' && r < '\x20':
// alternatively replace non-printables with a blank: field += " "
case r == '"':
encl = !encl
case !encl && r == sep:
fields = append(fields, field)
field = ""
default:
field += string(r)
}
}
return append(fields, field)
}
// PeekCSV returns a digest to identify the CSV (or TXT file) at "path". This digest consists of a
// preview slice of raw data rows (without blank or comment lines), a total file row estimate, the
// comment prefix used (if any), and if a CSV, the field separator, trimmed fields of the first
// data row split by it, a hint whether to treat this row as a heading, and a hash if a heading.
func PeekCSV(path string) (dig Digest, err error) {
defer func() {
if e := recover(); e != nil {
err = e.(error)
}
}()
file, e := os.Open(path)
if e != nil {
panic(fmt.Errorf("can't access %q (%v)", path, e))
}
defer file.Close()
info, e := file.Stat()
if e != nil {
panic(fmt.Errorf("can't access %q metadata (%v)", path, e))
}
bf, row, tlen, max := bufio.NewScanner(file), -1, 0, 1
getLine:
for row < previewRows && bf.Scan() {
switch ln := bf.Text(); {
case len(strings.TrimLeft(ln, " ")) == 0:
case dig.comment != "" && strings.HasPrefix(ln, dig.comment):
case row < 0:
row = 0
for _, p := range commentSet {
if strings.HasPrefix(ln, p) {
dig.comment = p
continue getLine
}
}
fallthrough
default:
row++
tlen += len(ln)
dig.preview = append(dig.preview, ln)
}
}
switch e := bf.Err(); {
case e != nil:
panic(fmt.Errorf("problem reading %q (%v)", path, e))
case row < 1:
panic(fmt.Errorf("%q does not contain data", path))
case row < previewRows:
dig.erows = row
default:
dig.erows = int(float64(info.Size())/float64(tlen-len(dig.preview[0])+row-1)*0.995+0.5) * (row - 1)
}
getSep:
for _, r := range sepSet {
c, sl := 0, []string{}
for _, ln := range dig.preview {
if sl = splitCSV(ln, r); len(sl) <= max || len(sl) != c && c > 0 {
continue getSep
}
for _, f := range sl {
if len(f) > maxFieldLen {
continue getSep
}
}
c = len(sl)
}
max, dig.sep = c, r
}
if dig.sep > '\x00' {
uf := make(map[string]int, max)
for _, f := range splitCSV(dig.preview[0], dig.sep) {
tf := strings.Trim(f, " ")
if _, e := strconv.ParseFloat(tf, 64); e != nil && len(tf) > 0 {
uf[tf]++
}
dig.split = append(dig.split, tf)
}
if dig.heading = len(uf) == max; dig.heading {
dig.md5 = fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(dig.split, string(dig.sep)))))
}
}
return
}
// ReadTXT returns a channel into which a goroutine writes maps of fixed-field TXT rows from file
// at "path" keyed by "cols" (channels also provided for errors and for the caller to signal a
// halt). Fields selected by byte ranges in the "cols" map are trimmed of blanks; empty fields
// are suppressed; blank lines and those prefixed by "comment" are skipped.
func ReadTXT(path string, cols map[string][2]int, comment string) (<-chan map[string]string, <-chan error, chan<- int) {
out, err, sig, sigv := make(chan map[string]string, 64), make(chan error, 1), make(chan int), 0
go func() {
defer func() {
if e := recover(); e != nil {
err <- e.(error)
}
close(err)
close(out)
}()
in, ierr, isig := readLn(path)
defer close(isig)
handleSig(sig, &sigv)
wid, line, algn := 0, 0, 0
for ln := range in {
for line++; ; {
switch {
case len(strings.TrimLeft(ln, " ")) == 0:
case comment != "" && strings.HasPrefix(ln, comment):
case wid == 0:
wid = len(ln)
if len(cols) == 0 || len(cols) > wid {
panic(fmt.Errorf("missing or bad column map provided for TXT file %q", path))
}
for _, r := range cols { // TODO: potential range overlaps a feature?
if r[0] <= 0 || r[0] > r[1] || r[1] > wid {
panic(fmt.Errorf("bad range in column map provided for TXT file %q", path))
}
}
continue
case len(ln) != wid:
if algn++; line > 200 && float64(algn)/float64(line) > 0.02 {
panic(fmt.Errorf("excessive column misalignment in TXT file %q (>%d rows)", path, algn))
}
default:
m := make(map[string]string, len(cols))
for c, r := range cols {
if f := strings.Trim(ln[r[0]-1:r[1]], " "); len(f) > 0 {
m[c] = f
}
}
if len(m) > 0 {
m["~line"] = strconv.Itoa(line)
out <- m
}
}
break
}
if sigv != 0 {
return
}
}
if e := <-ierr; e != nil {
panic(fmt.Errorf("problem reading TXT file %q (%v)", path, e))
}
}()
return out, err, sig
}
// ReadCSV returns a channel into which a goroutine writes field maps of CSV rows from file at
// "path" keyed by "cols" map which also identifies select columns for extraction, or if nil, by
// the heading in the first data row (channels also provided for errors and for the caller to
// signal a halt). CSV separator is "sep", or if \x00, will be inferred. Fields are trimmed of
// blanks and double-quotes (which may enclose separators); empty fields are suppressed; blank
// lines and those prefixed by "comment" are skipped.
func ReadCSV(path string, cols map[string]int, sep rune, comment string) (<-chan map[string]string, <-chan error, chan<- int) {
out, err, sig, sigv := make(chan map[string]string, 64), make(chan error, 1), make(chan int), 0
go func() {
defer func() {
if e := recover(); e != nil {
err <- e.(error)
}
close(err)
close(out)
}()
in, ierr, isig := readLn(path)
defer close(isig)
handleSig(sig, &sigv)
vcols, wid, line, algn := make(map[string]int, 32), 0, 0, 0
for ln := range in {
for line++; ; {
switch {
case len(strings.TrimLeft(ln, " ")) == 0:
case comment != "" && strings.HasPrefix(ln, comment):
case sep == '\x00':
for _, r := range sepSet {
if c := len(splitCSV(ln, r)); c > wid {
wid, sep = c, r
}
}
continue
case len(vcols) == 0:
sl, uc, sc, mc, qc := splitCSV(ln, sep), make(map[int]int), make(map[string]int), 0, make(map[string]int)
for c, i := range cols {
if c = strings.Trim(c, " "); c != "" && i > 0 {
sc[c] = i
if uc[i]++; i > mc {
mc = i
}
}
}
for i, c := range sl {
if c = strings.Trim(c, " "); c != "" {
if len(sc) == 0 || sc[c] > 0 {
vcols[c] = i + 1
}
if _, e := strconv.ParseFloat(c, 64); e != nil {
qc[c] = i + 1
}
}
}
switch wid = len(sl); {
case len(sc) == 0 && len(qc) == wid:
case len(sc) == 0:
panic(fmt.Errorf("no heading in CSV file %q and no column map provided", path))
case len(vcols) == len(sc):
case len(vcols) > 0:
panic(fmt.Errorf("missing columns in CSV file %q", path))
case len(qc) == wid || mc > wid:
panic(fmt.Errorf("column map incompatible with CSV file %q", path))
case len(uc) < len(sc):
panic(fmt.Errorf("ambiguous column map provided for CSV file %q", path))
default:
vcols = sc
continue
}
default:
if sl := splitCSV(ln, sep); len(sl) == wid {
m, heading := make(map[string]string, len(vcols)), true
for c, i := range vcols {
f := strings.Trim(sl[i-1], " ")
if len(f) > 0 {
m[c] = f
}
heading = heading && f == c
}
if !heading && len(m) > 0 {
m["~line"] = strconv.Itoa(line)
out <- m
}
} else if algn++; line > 200 && float64(algn)/float64(line) > 0.02 {
panic(fmt.Errorf("excessive column misalignment in CSV file %q (>%d rows)", path, algn))
}
}
break
}
if sigv != 0 {
return
}
}
if e := <-ierr; e != nil {
panic(fmt.Errorf("problem reading CSV file %q (%v)", path, e))
}
}()
return out, err, sig
}
|
package topology // import "github.com/nathanaelle/wireguard-topology"
import (
"fmt"
"io"
"io/ioutil"
"path/filepath"
"text/template"
)
type (
Template interface {
Execute(template string, dest io.Writer, data interface{}) (err error)
}
tmpls struct {
templates map[string]*template.Template
}
NetCluster struct {
Ifaces map[string]*WGInterface
}
WGInterface struct {
Host string
Address string
LocalIP string
Iface string
PrivateKey string
ListenPort uint16
FirewallMark uint32
Peers map[string]*WGPeer
Templates []string
Misc map[string]interface{}
}
WGPeer struct {
Host string
PublicKey string
PreSharedKey string
AllowedIPs string
Address string
PeerIP string
EndPoint string
PersistentKeepalive uint16
}
)
// LoadTemplates compiles all the available templates in a folder (not the subfolders)
// a template is a file with the extension .tmpl
func LoadTemplates(dir string) (Template, error) {
var err error
t := &tmpls{
templates: make(map[string]*template.Template),
}
if dir == "" {
return t, nil
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("can't read %q : %v", dir, err)
}
for _, file := range files {
if file.IsDir() {
continue
}
if file.Size() == 0 {
continue
}
if filepath.Ext(file.Name()) != ".tmpl" {
continue
}
fileData, err := ioutil.ReadFile(filepath.Join(dir, file.Name()))
if err != nil {
return nil, fmt.Errorf("can't read template %q : %v", file.Name(), err)
}
tmplName := file.Name()
tmplName = tmplName[0 : len(tmplName)-5]
t.templates[tmplName], err = template.New(tmplName).Parse(string(fileData))
if err != nil {
return nil, fmt.Errorf("can't parse template %q : %v", file.Name(), err)
}
}
return t, nil
}
func (t *tmpls) Execute(template string, dest io.Writer, data interface{}) error {
if tmpl, ok := t.templates[template]; ok {
return tmpl.Execute(dest, data)
}
return fmt.Errorf("can't load template %q", template)
}
//Render applys templates on the data and export the result in output
func Render(output Output, t Template, clusters map[string]*NetCluster) error {
for _, cluster := range clusters {
for _, wgiface := range cluster.Ifaces {
if err := output.AddFolder(wgiface.Host); err != nil {
return err
}
for _, template := range wgiface.Templates {
writecloser, err := output.AddEntry(wgiface.Host, template)
if err != nil {
return err
}
defer writecloser.Close()
if err := t.Execute(template, writecloser, wgiface); err != nil {
return err
}
}
}
}
return nil
}
|
package pxmgo
import (
"errors"
"io"
"github.com/jjeffcaii/mongo-proxy/protocol"
)
type Context interface {
io.Closer
Use(middlewares ...Middleware) Context
Send(bs []byte) error
SendMessage(msg protocol.Message) error
Next() <-chan protocol.Message
}
// Endpoint communicate endpoint for routing messages.
type Endpoint interface {
io.Closer
Serve(handler func(ctx Context)) error
}
var EOF = io.EOF
var Ignore = errors.New("skip message")
type Authenticator interface {
Middleware
Wait() (db *string, ok bool)
}
type Middleware interface {
// Handle handle request.
Handle(ctx Context, req protocol.Message) error
}
|
package models
//1. 思った通りにならなかったとき、しばらく考え込む 01
//2. 自分のいい面、得意なことを聞かれたら10個言える 10
//3. トラブルが起きた時、まず人に頼る 01
//4. 人と比較することが多い 01
//5. 昔から「マイペースだよね」とよく言われる 10
//6. 過去を振り返ったときに、結果を出してきたことがない 01
//7. 何かにつけて「無理」「できない」などの言葉が出がち 01
//8. 電車やエスカレーターの乗り降りなどでノロノロした人にイライラする 01
//9. 人の顔色が気になる 01
//10. チームで活動することが楽しい、好き 10
//11.朝起きて鏡を見たとき、嫌なところに目が行く 01
//12.アイデア力、発想力は高いと思う 10
//13.職場や学校で注意や指摘を受けると落ち込む 01
//14.やるぞ!と決めても本当にこれでいいのか考えてしまい、なかなか行動できない 01
//15.自分のペースを乱されるとイラっとしてしまう 01
//16.友達は多い方だ 10
//17.出かける前の洋服選びにかなり時間をかける 01
//18.新しいことにチャレンジしたいが、自分には難しいと感じる 01
//19.人から言われた何気ない一言が頭から消えないことがある 01
//20.落ち込んだときの自分のモチベーションのあげ方は分かっている 10
//21.周りとの人間関係は上手くいっている方だ 01
//22.直接「ありがとう」と言われない仕事は嫌だ 01
//26~47
type Mentality struct {
One string //n
Two string //y
Three string //n
Four string //n
Five string //y
Six string //n
Seven string //n
Eight string //n
Nine string //n
Ten string //y
Eleven string //n
Twelve string //y
Thirteen string //n
Fourteen string //n
Fifteen string //n
Sixteen string //y
Seventeen string //n
Eighteen string //n
Nineteen string //n
Twenty string //y
TwentyOne string //n
TwentyTwo string //n
Rate int
}
|
package config
import _ "github.com/joho/godotenv/autoload"
|
package stats
import (
"context"
"strings"
"time"
)
// Tags should use a key:value format
type Collector interface {
// FYI entry
Inform(string, string, ...string)
// Error resulting in a notification
Error(error, ...string)
// Measure rate of events over dT, an Inc = Count(1), Dec = Count(-1)
Count(string, float64, ...string)
// Log the value at T
Gauge(string, float64, ...string)
// Log the value at T for count/avg/median/max/95percentile
Timing(string, time.Duration, ...string)
Histogram(string, float64, ...string)
Close()
Tags() []string
With(...string) Collector
}
type contextKey struct {
v string
}
//nolint:gochecknoglobals
var ctxKeyCollector = contextKey{"collector"}
// ContextWithCollector creates a new context with an instance of Collector.
func ContextWithCollector(ctx context.Context, c Collector) context.Context {
return context.WithValue(ctx, ctxKeyCollector, c)
}
// CollectorFromContext returns Collector from ctx.
// The function returns nil if no Collector was stored in the Context.
func CollectorFromContext(ctx context.Context) Collector {
if c := ctx.Value(ctxKeyCollector); c != nil {
return c.(Collector)
}
return nil
}
type discardCollector struct{}
func NewDiscardCollector() Collector { return &discardCollector{} }
func (*discardCollector) Inform(_, _ string, _ ...string) {}
func (*discardCollector) Error(_ error, _ ...string) {}
func (*discardCollector) Count(_ string, _ float64, _ ...string) {}
func (*discardCollector) Gauge(_ string, _ float64, _ ...string) {}
func (*discardCollector) Timing(_ string, _ time.Duration, _ ...string) {}
func (*discardCollector) Histogram(_ string, _ float64, _ ...string) {}
func (*discardCollector) Close() {}
func (*discardCollector) Tags() []string { return nil }
func (dc *discardCollector) With(...string) Collector { return dc }
// Safe for concurrent use.
//nolint:gochecknoglobals
var replacer = strings.NewReplacer(" ", "_", ".", "_")
// Sanitise returns copy of in string in lowercase; ' ' and '.' replaced with '_'.
func Sanitise(in string) string { return replacer.Replace(strings.ToLower(in)) }
type withCollector struct {
tags []string
c Collector
}
func NewWithCollector(c Collector, tags ...string) Collector {
return &withCollector{
tags: tags,
c: c,
}
}
func (wc *withCollector) allTags(tags ...string) []string {
t := make([]string, 0, len(wc.tags)+len(tags))
return append(append(t, wc.tags...), tags...)
}
func (wc *withCollector) Inform(title, text string, tags ...string) {
wc.c.Inform(title, text, wc.allTags(tags...)...)
}
func (wc *withCollector) Error(err error, tags ...string) {
wc.c.Error(err, wc.allTags(tags...)...)
}
func (wc *withCollector) Count(stat string, count float64, tags ...string) {
wc.c.Count(stat, count, wc.allTags(tags...)...)
}
func (wc *withCollector) Gauge(stat string, value float64, tags ...string) {
wc.c.Gauge(stat, value, wc.allTags(tags...)...)
}
func (wc *withCollector) Timing(stat string, value time.Duration, tags ...string) {
wc.c.Timing(stat, value, wc.allTags(tags...)...)
}
func (wc *withCollector) Histogram(stat string, value float64, tags ...string) {
wc.c.Histogram(stat, value, wc.allTags(tags...)...)
}
func (wc *withCollector) Close() {
wc.c.Close()
}
func (wc *withCollector) Tags() []string {
ot := wc.c.Tags()
t := make([]string, 0, len(wc.tags)+len(ot))
return append(append(t, ot...), wc.tags...)
}
func (wc *withCollector) With(tags ...string) Collector {
return NewWithCollector(wc, tags...)
}
|
package main
import (
"encoding/json"
"fmt"
"runtime"
"sort"
"sync"
)
var globalIndex int
var mutex sync.Mutex
func main() {
p := Parser{}
QBs, RBs, WRs, TEs, DSTs := p.Parse()
for _, q := range QBs {
q.ScoreQB()
}
for _, r := range RBs {
r.ScoreRB()
}
for _, w := range WRs {
w.ScoreWR()
}
for _, t := range TEs {
t.ScoreTE()
}
for _, d := range DSTs {
d.ScoreDST()
}
QBsTemp := make([]*Player, 0)
RBsTemp := make([]*Player, 0)
WRsTemp := make([]*Player, 0)
TEsTemp := make([]*Player, 0)
for _, q := range QBs {
if q.ProjectedPoints >= 15 {
QBsTemp = append(QBsTemp, q)
}
}
for _, r := range RBs {
if r.ProjectedPoints >= 8 {
RBsTemp = append(RBsTemp, r)
}
}
for _, w := range WRs {
if w.ProjectedPoints >= 10 {
WRsTemp = append(WRsTemp, w)
}
}
for _, t := range TEs {
if t.ProjectedPoints >= 7 {
TEsTemp = append(TEsTemp, t)
}
}
QBs = QBsTemp
RBs = RBsTemp
WRs = WRsTemp
TEs = TEsTemp
AdjustPlayers(QBs, RBs, WRs, TEs, DSTs)
QBsTemp = make([]*Player, 0)
RBsTemp = make([]*Player, 0)
WRsTemp = make([]*Player, 0)
TEsTemp = make([]*Player, 0)
for _, q := range QBs {
if q.ProjectedPoints >= 10 {
QBsTemp = append(QBsTemp, q)
//fmt.Printf("%s - %f\n", q.Name, q.ProjectedPoints)
}
}
//fmt.Printf("----\n")
for _, r := range RBs {
if r.ProjectedPoints >= 4 {
RBsTemp = append(RBsTemp, r)
//fmt.Printf("%s - %f\n", r.Name, r.ProjectedPoints)
}
}
//fmt.Printf("----\n")
for _, w := range WRs {
if w.ProjectedPoints >= 4 {
WRsTemp = append(WRsTemp, w)
//fmt.Printf("%s - %f\n", w.Name, w.ProjectedPoints)
}
}
//fmt.Printf("----\n")
for _, t := range TEs {
if t.ProjectedPoints >= 3 {
TEsTemp = append(TEsTemp, t)
//fmt.Printf("%s - %f\n", t.Name, t.ProjectedPoints)
}
}
QBs = QBsTemp
RBs = RBsTemp
WRs = WRsTemp
TEs = TEsTemp
/*
fmt.Printf("%d\n", len(QBs))
fmt.Printf("%d\n", len(RBs))
fmt.Printf("%d\n", len(WRs))
fmt.Printf("%d\n", len(TEs))
return
*/
sort.Sort(PlayersPoints(QBs))
sort.Sort(PlayersPoints(RBs))
sort.Sort(PlayersPoints(WRs))
sort.Sort(PlayersPoints(TEs))
sort.Sort(PlayersPoints(DSTs))
cores := runtime.NumCPU()
wgs := make([]sync.WaitGroup, cores)
rosters := make([]Rosters, cores)
for i := 0; i < cores; i++ {
wgs[i].Add(1)
go func(pid int) {
rosters[pid] = create(pid, QBs, RBs, WRs, TEs, DSTs)
wgs[pid].Done()
}(i)
}
for i := 0; i < cores; i++ {
wgs[i].Wait()
}
master := make(Rosters, 0)
for i := 0; i < cores; i++ {
sort.Sort(rosters[i])
master = append(master, rosters[i]...)
}
rostersNoDupes := make(Rosters, 0)
for _, roster := range master {
found := false
for _, inner := range rostersNoDupes {
if inner.Equal(roster) {
found = true
break
}
}
if !found {
rostersNoDupes = append(rostersNoDupes, roster)
}
}
sort.Sort(rostersNoDupes)
if len(rostersNoDupes) > 100 {
rostersNoDupes = rostersNoDupes[:100]
}
b, _ := json.MarshalIndent(rostersNoDupes, "", "\t")
fmt.Printf("%s\n", string(b))
}
func create(pid int, QBs, RBs, WRs, TEs, DSTs []*Player) Rosters {
candidates := make(Rosters, 0, 100000)
var q *Player
for true {
mutex.Lock()
if globalIndex < len(QBs) {
q = QBs[globalIndex]
globalIndex += 1
mutex.Unlock()
} else {
mutex.Unlock()
break
}
roster := &Roster{}
roster.addPlayer(q, 0, false)
roster.popPlayer(WR, 0)
for wr1I, wr1 := range WRs { // wr1
added := roster.addPlayer(wr1, 0, false)
if !added {
continue
}
roster.popPlayer(RB, 0)
for rb1I, rb1 := range RBs { // rb1
added := roster.addPlayer(rb1, 0, false)
if !added {
continue
}
roster.popPlayer(RB, 1)
for rb2I, rb2 := range RBs[rb1I+1:] { //rb2
if rb2.team == rb1.team {
continue
}
added := roster.addPlayer(rb2, 1, false)
if !added {
continue
}
roster.popPlayer(WR, 1)
for wr2I, wr2 := range WRs[wr1I+1:] { // wr2
if wr2.team == wr1.team {
continue
}
added := roster.addPlayer(wr2, 1, false)
if !added {
continue
}
roster.popPlayer(TE, 0)
for _, te := range TEs { // te
added := roster.addPlayer(te, 0, false)
if !added {
continue
}
roster.popPlayer(WR, 2)
for wr3I, wr3 := range WRs[wr2I+1:] { // wr3
if wr3.team == wr1.team || wr3.team == wr2.team {
continue
}
added := roster.addPlayer(wr3, 2, false)
if !added {
continue
}
count := 0
for _, dst := range DSTs { // dst
roster.popPlayer(DST, 0)
if count > 2 {
break
}
added := roster.addPlayer(dst, 0, false)
if !added {
continue
count += 1
}
addFLEX(roster, RBs[rb2I+1:], WRs[wr3I+1:], &candidates)
roster.popPlayer(DST, 0)
} // dst loop
roster.popPlayer(WR, 2)
} // wr 3 loop
roster.popPlayer(TE, 0)
} // te loop
roster.popPlayer(WR, 1)
} // wr 2 loop
roster.popPlayer(RB, 1)
} // rb 2 loop
roster.popPlayer(RB, 0)
} // rb 1 loop
roster.popPlayer(WR, 0)
} //wr 1 loop
}
return candidates
}
func addFLEX(roster *Roster, RBs, WRs []*Player, candidates *Rosters) {
var l int
if len(RBs) > len(WRs) {
l = len(WRs)
} else {
l = len(RBs)
}
var rbI, wrI int
for i := 0; i < l; i++ {
var f *Player
var rb *Player
for rbI < len(RBs) && (RBs[rbI].team == roster.RB1.team || RBs[rbI].team == roster.RB2.team) {
if rbI >= len(RBs) {
break
}
rbI += 1
}
for wrI < len(WRs) && (WRs[wrI].team == roster.WR1.team || WRs[wrI].team == roster.WR2.team || WRs[wrI].team == roster.WR3.team) {
if wrI >= len(WRs) {
break
}
wrI += 1
}
if rbI < len(RBs) {
rb = RBs[rbI]
}
var wr *Player
if wrI < len(WRs) {
wr = WRs[wrI]
}
if rb == nil && wr == nil {
break
}
if wr == nil {
f = rb
rbI++
} else if rb == nil {
f = wr
wrI++
} else if rb.ProjectedPoints < wr.ProjectedPoints {
f = wr
wrI++
} else {
f = rb
rbI++
}
roster.popPlayer(FLEX, 0)
if roster.addPlayer(f, 0, true) {
if len(*candidates) < 100 || roster.Points >= (*candidates)[len(*candidates)-1].Points {
cpy := roster.Copy()
*candidates = append(*candidates, cpy)
sort.Sort(candidates)
if len(*candidates) > 100000 {
*candidates = (*candidates)[:100]
}
}
roster.popPlayer(FLEX, 0)
break
}
}
}
|
package subscription
import (
"context"
"github.com/syncromatics/kafmesh/internal/graph/resolvers"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//go:generate mockgen -source=./subscribers.go -destination=./subscribers_mock_test.go -package=subscription_test
// PodLister gets the pods running in the cluster
type PodLister interface {
List(context.Context, metav1.ListOptions) (*corev1.PodList, error)
}
// Factory returns a watch grpc client
type Factory interface {
Client(ctx context.Context, url string) (Watcher, error)
}
var _ resolvers.Subscribers = &Subscribers{}
// Subscribers provides real time subscription handlers
type Subscribers struct {
PodLister PodLister
Factory Factory
ProcessorRepository ProcessorRepository
}
// NewSubscribers creates new subscribers
func NewSubscribers(podLister PodLister, processorRepository ProcessorRepository) *Subscribers {
return &Subscribers{
PodLister: podLister,
Factory: &ClientFactory{},
ProcessorRepository: processorRepository,
}
}
// Processor returns the processor subscriber handler
func (s *Subscribers) Processor() resolvers.ProcessorWatcher {
return &Processor{
Factory: s.Factory,
PodLister: s.PodLister,
ProcessorRepository: s.ProcessorRepository,
}
}
|
package handler
import (
"errors"
"net/http"
"strconv"
"github.com/mafewo/meliexercise/database/mongo"
"github.com/mafewo/meliexercise/models"
"github.com/mafewo/meliexercise/msj"
mgo "gopkg.in/mgo.v2"
)
// GetWeatherByDay get weather by day
func GetWeatherByDay(w http.ResponseWriter, r *http.Request) {
queryVals := r.URL.Query()
day := queryVals.Get("day")
session, collection, err := _connect()
if err != nil {
msj.Set(w, err.Error(), 500).ReturnJSON()
return
}
defer session.Close()
mw := &models.ModelWeather{
Conn: session,
Collection: collection,
Data: nil,
}
dayint, _ := strconv.ParseInt(day, 10, 32)
weathers, err := mw.Getday(int32(dayint))
if err != nil {
msj.Set(w, err.Error(), 404).ReturnJSON()
return
}
_response(w, weathers)
}
// GetResumenWheather get and generate resume to weather from 10 years
func GetResumenWheather(w http.ResponseWriter, r *http.Request) {
session, collection, err := _connect()
if err != nil {
msj.Set(w, err.Error(), 500).ReturnJSON()
return
}
defer session.Close()
mw := &models.ModelWeather{
Conn: session,
Collection: collection,
Data: nil,
}
weathers, err := mw.GetAll()
if err != nil {
msj.Set(w, err.Error(), 404).ReturnJSON()
return
}
resumen, err := CalculateResumen(weathers)
if err != nil {
msj.Set(w, err.Error(), 404).ReturnJSON()
return
}
_response(w, resumen)
}
// CalculateWeather calulate the weather to Solar System
func CalculateWeather(sliceSS []models.SolarSystem) error {
session, collection, err := _connect()
if err != nil {
return err
}
defer session.Close()
mw := &models.ModelWeather{
Conn: session,
Collection: collection,
Data: nil,
}
err = mw.DropCollection()
if err != nil {
return err
}
day := 0
for _, ss := range sliceSS {
day++
weatherData := models.Weather{}
weatherData.Day = day
if DroughtDay(ss) {
weatherData.Estate = "Drougth"
} else if OptimalDay(ss) {
weatherData.Estate = "Optimal"
} else if RainDay(ss) {
weatherData.Estate = "Rain"
weatherData.Perimeter = int(ss.Perimiter())
} else {
weatherData.Estate = "Unknown"
}
_, err = mw.Insert(weatherData)
if err != nil {
return err
}
}
return nil
}
// CalculateResumen calulate the weather resumen to Solar System from to 10 years
func CalculateResumen(weathers []models.Weather) (map[string]interface{}, error) {
var days []int
resumen := make(map[string]interface{})
drougth := 0
optimal := 0
rain := 0
unknown := 0
day := 0
max, err := GetMaxRain()
if err != nil {
return resumen, err
}
for _, w := range weathers {
day++
switch w.Estate {
case "Drougth":
drougth++
case "Optimal":
optimal++
case "Rain":
if max == w.Perimeter {
days = append(days, day)
}
rain++
default:
unknown++
}
}
resumen["Drougth"] = drougth
resumen["Optimal"] = optimal
resumen["Rain"] = rain
resumen["Unknown"] = unknown
resumen["DaysStrom"] = days
return resumen, nil
}
// GetMaxRain obtein a max perimeter of the rain days
func GetMaxRain() (int, error) {
var max models.Weather
session, collection, err := _connect()
if err != nil {
return max.Perimeter, err
}
defer session.Close()
mw := &models.ModelWeather{
Conn: session,
Collection: collection,
Data: nil,
}
max, err = mw.GetMaxRain()
if err != nil {
return max.Perimeter, err
}
return max.Perimeter, nil
}
// OptimalDay return if they are parallels
func OptimalDay(ss models.SolarSystem) bool {
return ss.TheyAreParallels()
}
// RainDay return if the sun is inside the triangle
func RainDay(ss models.SolarSystem) bool {
return ss.TriangleContainSun()
}
// DroughtDay return if the planets are aligned with the center
func DroughtDay(ss models.SolarSystem) bool {
if ss.TheyAreOnAxes() {
return true
}
if ss.TheyAreParallels() {
if ss.TheyPassThroughTheSun() {
return true
}
}
return false
}
func _connect() (*mgo.Session, *mgo.Collection, error) {
//realizo la conexion a la DB
session, collection, err := mongo.NewMG(
"Melidb",
"@melidb",
"cluster0-shard-00-00-nc9ip.mongodb.net",
"27017",
"weather",
"test",
).InitializeDatabase()
if err != nil {
return nil, nil, errors.New("no se pudo conectar a la base de datos")
}
return session, collection, nil
}
|
// Copyright 2021 Praetorian Security, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"log"
"os"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/transport/ssh"
)
// CloneModule clones a remote git repository
// An optional keyfile may be specified for use in ssh authentication
// If quiet is true, don't print clone progress to stdout
func CloneModule(dir string, url string, branch string, keyFile string, quiet bool) error {
var cloneOptions git.CloneOptions
cloneOptions = git.CloneOptions{
URL: url,
}
if !quiet {
log.Printf("Cloning new remote module: %s\n", url)
cloneOptions.Progress = os.Stdout
}
if len(branch) != 0 {
log.Printf("Cloning with remote branch reference: %s\n", branch)
cloneOptions.ReferenceName = plumbing.NewBranchReferenceName(branch)
}
if len(keyFile) != 0 {
_, err := os.Stat(keyFile)
if err != nil {
log.Printf("Read file %s failed %s\n", keyFile, err.Error())
return err
}
// Clone the given repository to the given directory (password set to "")
publicKeys, err := ssh.NewPublicKeysFromFile("git", keyFile, "")
if err != nil {
log.Printf("Generate publickeys from file %s failed: %s\n", keyFile, err.Error())
return err
}
log.Printf("Authenticating with ssh keyfile: %s\n", keyFile)
cloneOptions.Auth = publicKeys
}
_, err := git.PlainClone(dir, false, &cloneOptions)
return err
}
//CleanupModule attempts to delete a directory.
func CleanupModule(dir string) error {
err := os.RemoveAll(dir)
return err
}
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package cmd
import (
"github.com/spf13/cobra"
"github.com/aws-controllers-k8s/dev-tools/pkg/config"
"github.com/aws-controllers-k8s/dev-tools/pkg/repository"
)
var ensureRepositoriesCmd = &cobra.Command{
Use: "repo",
Aliases: []string{"repos", "repositories", "repository"},
RunE: ensureAllRepositories,
Args: cobra.NoArgs,
Short: "Ensure repositories are forked and cloned locally",
}
func ensureAllRepositories(cmd *cobra.Command, args []string) error {
cfg, err := config.Load(ackConfigPath)
if err != nil {
return err
}
repoManager, err := repository.NewManager(cfg)
if err != nil {
return err
}
err = repoManager.LoadAll()
if err != nil {
return err
}
ctx := cmd.Context()
return repoManager.EnsureAll(ctx)
}
|
package finduser
import (
"errors"
pagerduty "github.com/PagerDuty/go-pagerduty"
)
var UserNotFoundError = errors.New("Could not find specified user.")
type Client struct {
pagerduty.Client
}
func (c *Client) FindAndValidate(in string) (*pagerduty.User, error) {
u, e := c.GetUser(in, pagerduty.GetUserOptions{})
if e == nil && u.ID == in {
return u, e
}
return c.FindUser(in)
}
func (c *Client) FindUser(in string) (*pagerduty.User, error) {
resp, e := c.ListUsers(pagerduty.ListUsersOptions{Query: in})
if e != nil {
return nil, e
}
if len(resp.Users) == 1 {
return &resp.Users[0], nil
}
return nil, UserNotFoundError
}
|
/**
Create a for loop using this syntax
for {}
Have it print out the years you have been alive
*/
package main
import (
"fmt"
"time"
)
func main() {
birthYear := 1995
for {
if birthYear > time.Now().Year() {
break
}
fmt.Println(birthYear)
birthYear++
}
}
|
package main
import (
"log"
sarama "github.com/Shopify/sarama"
)
func main() {
consumer, err := sarama.NewConsumer([]string{"localhost:9092"}, nil)
if err != nil {
panic(err)
}
defer consumer.Close()
partitionConsumer, err := consumer.ConsumePartition("example", 0, sarama.OffsetNewest)
if err != nil {
panic(err)
}
defer partitionConsumer.Close()
for {
msg := <-partitionConsumer.Messages()
log.Printf("Consumed message: \"%s\" at offset: %d\n", msg.Value, msg.Offset)
}
}
|
package chains
import (
"fmt"
"sync"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/hive.go/daemon"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/hive.go/node"
"github.com/iotaledger/wasp/packages/chain"
registry_pkg "github.com/iotaledger/wasp/packages/registry"
"github.com/iotaledger/wasp/plugins/nodeconn"
"github.com/iotaledger/wasp/plugins/peering"
"github.com/iotaledger/wasp/plugins/registry"
)
const PluginName = "Chains"
var (
log *logger.Logger
chains = make(map[coretypes.ChainID]chain.Chain)
chainsMutex = &sync.RWMutex{}
)
func Init() *node.Plugin {
return node.NewPlugin(PluginName, node.Enabled, configure, run)
}
func configure(_ *node.Plugin) {
log = logger.NewLogger(PluginName)
}
func run(_ *node.Plugin) {
err := daemon.BackgroundWorker(PluginName, func(shutdownSignal <-chan struct{}) {
chainRecords, err := registry_pkg.GetChainRecords()
if err != nil {
log.Error("failed to load chain records from registry: %v", err)
return
}
astr := make([]string, len(chainRecords))
for i := range astr {
astr[i] = chainRecords[i].ChainID.String()[:10] + ".."
}
log.Debugf("loaded %d chain record(s) from registry: %+v", len(chainRecords), astr)
for _, chr := range chainRecords {
if chr.Active {
if err := ActivateChain(chr); err != nil {
log.Errorf("cannot activate committee %s: %v", chr.ChainID, err)
}
}
}
<-shutdownSignal
func() {
log.Infof("shutdown signal received: dismissing committees..")
chainsMutex.RLock()
defer chainsMutex.RUnlock()
for _, com := range chains {
com.Dismiss()
}
log.Infof("shutdown signal received: dismissing committees.. Done")
}()
})
if err != nil {
log.Error(err)
return
}
}
// ActivateChain activates chain on the Wasp node:
// - creates chain object
// - insert it into the runtime registry
// - subscribes for related transactions in he IOTA node
func ActivateChain(chr *registry_pkg.ChainRecord) error {
chainsMutex.Lock()
defer chainsMutex.Unlock()
if !chr.Active {
return fmt.Errorf("cannot activate chain for deactivated chain record")
}
_, ok := chains[chr.ChainID]
if ok {
log.Debugf("chain is already active: %s", chr.ChainID.String())
return nil
}
// create new chain object
defaultRegistry := registry.DefaultRegistry()
c := chain.New(chr, log, peering.DefaultNetworkProvider(), defaultRegistry, defaultRegistry, func() {
nodeconn.Subscribe((address.Address)(chr.ChainID), chr.Color)
})
if c != nil {
chains[chr.ChainID] = c
log.Infof("activated chain:\n%s", chr.String())
} else {
log.Infof("failed to activate chain:\n%s", chr.String())
}
return nil
}
// DeactivateChain deactivates chain in the node
func DeactivateChain(chr *registry_pkg.ChainRecord) error {
chainsMutex.Lock()
defer chainsMutex.Unlock()
c, ok := chains[chr.ChainID]
if !ok || c.IsDismissed() {
log.Debugf("chain is not active: %s", chr.ChainID.String())
return nil
}
c.Dismiss()
log.Debugf("chain has been deactivated: %s", chr.ChainID.String())
return nil
}
// GetChain returns active chain object or nil if it doesn't exist
func GetChain(chainID coretypes.ChainID) chain.Chain {
chainsMutex.RLock()
defer chainsMutex.RUnlock()
ret, ok := chains[chainID]
if ok && ret.IsDismissed() {
delete(chains, chainID)
nodeconn.Unsubscribe((address.Address)(chainID))
return nil
}
return ret
}
|
package factories
import (
"database/sql"
"github.com/barrydev/api-3h-shop/src/common/connect"
"github.com/barrydev/api-3h-shop/src/connections"
"github.com/barrydev/api-3h-shop/src/model"
)
func StatisticOrder(query *connect.QueryMySQL) (*model.StatisticOrder, error) {
connection := connections.Mysql.GetConnection()
queryString := `
SELECT
COALESCE(COUNT(*), 0), COALESCE(SUM(IF(payment_status='pending', 1, 0)), 0), COALESCE(SUM(IF(payment_status='paid', 1, 0)), 0), COALESCE(SUM(IF(fulfillment_status='in-production', 1, 0)), 0), COALESCE(SUM(IF(fulfillment_status='shipped', 1, 0)), 0), COALESCE(SUM(IF(fulfillment_status='cancelled', 1, 0)), 0), COALESCE(SUM(IF(fulfillment_status='fulfilled', 1, 0)), 0)
FROM orders
`
var args []interface{}
if query != nil {
queryString += query.QueryString
args = query.Args
}
stmt, err := connection.Prepare(queryString)
if err != nil {
return nil, err
}
defer stmt.Close()
var statistic model.StatisticOrder
err = stmt.QueryRow(args...).Scan(
&statistic.TotalOrder,
&statistic.Pending,
&statistic.Paid,
&statistic.InProduction,
&statistic.Shipped,
&statistic.Cancelled,
&statistic.Fulfilled,
)
switch err {
case sql.ErrNoRows:
return nil, nil
case nil:
return &statistic, nil
default:
return nil, err
}
return &statistic, nil
}
|
package main
import "fmt"
func main() {
words := [3][2]string{
{"a", "b"},
{"c", "d"},
{"e", "f"},
}
for _, v1 := range words {
for _, v2 := range v1 {
fmt.Printf("%v ", v2)
}
fmt.Printf("\n")
}
}
|
package main
import (
"context"
"crypto/rand"
"errors"
"flag"
"fmt"
"log"
mathRand "math/rand"
"net"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"sync/atomic"
"time"
"github.com/pion/stun"
)
var (
workers = flag.Int("w", runtime.GOMAXPROCS(0), "concurrent workers") // nolint:gochecknoglobals
addr = flag.String("addr", "localhost", "target address") // nolint:gochecknoglobals
port = flag.Int("port", stun.DefaultPort, "target port") // nolint:gochecknoglobals
duration = flag.Duration("d", time.Minute, "benchmark duration") // nolint:gochecknoglobals
network = flag.String("net", "udp", "protocol to use (udp, tcp)") // nolint:gochecknoglobals
cpuProfile = flag.String("cpuprofile", "", "file output of pprof cpu profile") // nolint:gochecknoglobals
memProfile = flag.String("memprofile", "", "file output of pprof memory profile") // nolint:gochecknoglobals
realRand = flag.Bool("crypt", false, "use crypto/rand as random source") // nolint:gochecknoglobals
)
func main() { // nolint:gocognit
flag.Parse()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
start := time.Now()
var (
request int64
requestOK int64
requestErr int64
)
if *cpuProfile != "" {
f, createErr := os.Create(*cpuProfile)
if createErr != nil {
log.Fatalln("failed to create cpu profile output file:", createErr)
}
if pprofErr := pprof.StartCPUProfile(f); pprofErr != nil {
log.Fatalln("failed to start pprof cpu profiling:", pprofErr)
}
defer func() {
pprof.StopCPUProfile()
if closeErr := f.Close(); closeErr != nil {
log.Println("failed to close cpu profile output file:", closeErr)
} else {
fmt.Println("saved cpu profile to", *cpuProfile)
}
}()
}
if *memProfile != "" {
f, createErr := os.Create(*memProfile)
if createErr != nil {
log.Panicln("failed to create memory profile output file:", createErr)
}
defer func() {
if pprofErr := pprof.Lookup("heap").WriteTo(f, 1); pprofErr != nil {
log.Fatalln("failed to write pprof memory profiling:", pprofErr)
}
if closeErr := f.Close(); closeErr != nil {
log.Println("failed to close memory profile output file:", closeErr)
} else {
fmt.Println("saved memory profile to", *memProfile)
}
}()
}
ctx, cancel := context.WithTimeout(context.Background(), *duration)
go func() {
for sig := range signals {
fmt.Println("stopping on", sig)
cancel()
}
}()
if *realRand {
fmt.Println("using crypto/rand as random source for transaction id")
}
for i := 0; i < *workers; i++ {
wConn, connErr := net.Dial(*network, fmt.Sprintf("%s:%d", *addr, *port))
if connErr != nil {
log.Panicln("failed to dial:", wConn)
}
c, clientErr := stun.NewClient(wConn)
if clientErr != nil {
log.Panicln("failed to create client:", clientErr)
}
go func() {
req := stun.New()
for {
if *realRand {
if _, err := rand.Read(req.TransactionID[:]); err != nil {
log.Fatal("rand.Read failed:", err)
}
} else {
mathRand.Read(req.TransactionID[:]) // nolint:gosec
}
req.Type = stun.BindingRequest
req.WriteHeader()
atomic.AddInt64(&request, 1)
if doErr := c.Do(req, func(event stun.Event) {
if event.Error != nil {
if !errors.Is(event.Error, stun.ErrTransactionTimeOut) {
log.Println("event.Error error:", event.Error)
}
atomic.AddInt64(&requestErr, 1)
return
}
atomic.AddInt64(&requestOK, 1)
}); doErr != nil {
if !errors.Is(doErr, stun.ErrTransactionExists) {
log.Println("Do() error:", doErr)
}
atomic.AddInt64(&requestErr, 1)
}
}
}()
}
fmt.Println("workers started")
<-ctx.Done()
stop := time.Now()
rps := int(float64(atomic.LoadInt64(&requestOK)) / stop.Sub(start).Seconds())
fmt.Println("rps:", rps)
if reqErr := atomic.LoadInt64(&requestErr); requestErr != 0 {
fmt.Println("errors:", reqErr)
}
fmt.Println("total:", atomic.LoadInt64(&request))
}
|
//go:build ignore
// +build ignore
package main
import (
"context"
"fmt"
"github.com/looplab/fsm"
)
func main() {
var afterFinishCalled bool
fsm := fsm.NewFSM(
"start",
fsm.Events{
{Name: "run", Src: []string{"start"}, Dst: "end"},
{Name: "finish", Src: []string{"end"}, Dst: "finished"},
{Name: "reset", Src: []string{"end", "finished"}, Dst: "start"},
},
fsm.Callbacks{
"enter_end": func(ctx context.Context, e *fsm.Event) {
if err := e.FSM.Event(ctx, "finish"); err != nil {
fmt.Println(err)
}
},
"after_finish": func(ctx context.Context, e *fsm.Event) {
afterFinishCalled = true
if e.Src != "end" {
panic(fmt.Sprintf("source should have been 'end' but was '%s'", e.Src))
}
if err := e.FSM.Event(ctx, "reset"); err != nil {
fmt.Println(err)
}
},
},
)
if err := fsm.Event(context.Background(), "run"); err != nil {
panic(fmt.Sprintf("Error encountered when triggering the run event: %v", err))
}
if !afterFinishCalled {
panic(fmt.Sprintf("After finish callback should have run, current state: '%s'", fsm.Current()))
}
currentState := fsm.Current()
if currentState != "start" {
panic(fmt.Sprintf("expected state to be 'start', was '%s'", currentState))
}
fmt.Println("Successfully ran state machine.")
}
|
package models
import "encoding/xml"
type SoapResponse struct {
XMLName xml.Name `xml:"Envelope"`
Text string `xml:",chardata"`
Soap string `xml:"soap,attr"`
Xsd string `xml:"xsd,attr"`
Xsi string `xml:"xsi,attr"`
Header struct {
Text string `xml:",chardata"`
SOAPENV string `xml:"SOAP-ENV,attr"`
} `xml:"Header"`
Body struct {
Text string `xml:",chardata"`
SendMessageResponse struct {
Text string `xml:",chardata"`
Xmlns string `xml:"xmlns,attr"`
Response struct {
Text string `xml:",chardata"`
Xmlns string `xml:"xmlns,attr"`
ResponseInfo struct {
Text string `xml:",chardata"`
ResponseDate string `xml:"responseDate"`
Status struct {
Text string `xml:",chardata"`
Code string `xml:"code"`
Message string `xml:"message"`
} `xml:"status"`
} `xml:"responseInfo"`
ResponseData struct {
Text string `xml:",chardata"`
Data struct {
Text string `xml:",chardata"`
Type string `xml:"type,attr"`
Q1 string `xml:"q1,attr"`
Result struct {
Text string `xml:",chardata"`
Covid []CovidResult `xml:"covid"`
} `xml:"result"`
} `xml:"data"`
} `xml:"responseData"`
} `xml:"response"`
} `xml:"SendMessageResponse"`
} `xml:"Body"`
}
type CovidResult struct {
Text string `xml:",chardata"`
Key string `xml:"Key"`
Patient struct {
Text string `xml:",chardata"`
IIN string `xml:"IIN"`
IsResident string `xml:"IsResident"`
Birthday string `xml:"Birthday"`
Gender string `xml:"Gender"`
FirstName string `xml:"FirstName"`
LastName string `xml:"LastName"`
MiddleName string `xml:"MiddleName"`
AddressOfActualResidence string `xml:"AddressOfActualResidence"`
PlaceOfStudyOrWork string `xml:"PlaceOfStudyOrWork"`
} `xml:"Patient"`
HasSymptomsCOVID string `xml:"HasSymptomsCOVID"`
AccordingToEpidemiologicalIndications struct {
Text string `xml:",chardata"`
Type string `xml:"Type"`
} `xml:"AccordingToEpidemiologicalIndications"`
ForThePurposeOfEpidemiologicalSurveillance struct {
Text string `xml:",chardata"`
Type string `xml:"Type"`
Other string `xml:"Other"`
Diagnosis string `xml:"Diagnosis"`
} `xml:"ForThePurposeOfEpidemiologicalSurveillance"`
ForPreventivePurposes struct {
Text string `xml:",chardata"`
Type string `xml:"Type"`
Other string `xml:"Other"`
} `xml:"ForPreventivePurposes"`
ProbeStatus string `xml:"ProbeStatus"`
CollectedTime string `xml:"CollectedTime"`
ProtocolDate string `xml:"ProtocolDate"`
ResearchResults string `xml:"ResearchResults"`
CreatedAt string `xml:"CreatedAt"`
}
|
package xlsx
import (
"errors"
"fmt"
"github.com/plandem/xlsx/format"
"github.com/plandem/xlsx/internal"
"github.com/plandem/xlsx/internal/ml"
"github.com/plandem/xlsx/internal/number_format"
"github.com/plandem/xlsx/internal/number_format/convert"
"github.com/plandem/xlsx/types"
"math"
"strconv"
"time"
)
//Cell is a higher level object that wraps ml.Cell with functionality
type Cell struct {
ml *ml.Cell
sheet *sheetInfo
}
var (
errTypeMismatch = errors.New("type mismatch")
)
//Type returns current type of cell
func (c *Cell) Type() types.CellType {
return c.ml.Type
}
//Value returns current raw value of cell
func (c *Cell) Value() string {
var value string
switch c.ml.Type {
case types.CellTypeInlineString:
if c.ml.InlineStr != nil {
value = fromRichText(c.ml.InlineStr)
}
case types.CellTypeSharedString:
var sid int
if len(c.ml.Value) > 0 {
sid, _ = strconv.Atoi(c.ml.Value)
}
value = fromRichText(c.sheet.workbook.doc.sharedStrings.get(sid))
default:
value = c.ml.Value
}
return value
}
//String returns formatted value as string respecting cell number format and type. Any errors ignored to conform String() interface.
func (c *Cell) String() string {
//if cell has error, then just return value that Excel put here
if c.ml.Type == types.CellTypeError {
return c.ml.Value
}
code := c.sheet.workbook.doc.styleSheet.resolveNumberFormat(c.ml.Style)
//N.B.: Maybe it's not a good idea to use resolved value (e.g. inline string) for conversion?!
return numberFormat.Format(c.Value(), code, c.ml.Type)
}
//Date try to convert and return current raw value as time.Time
func (c *Cell) Date() (time.Time, error) {
if c.ml.Type == types.CellTypeDate || c.ml.Type == types.CellTypeNumber || c.ml.Type == types.CellTypeGeneral {
return convert.ToDate(c.ml.Value)
}
return time.Now(), errTypeMismatch
}
//Int try to convert and return current raw value as int
func (c *Cell) Int() (int, error) {
if c.ml.Type == types.CellTypeNumber || c.ml.Type == types.CellTypeGeneral {
return convert.ToInt(c.ml.Value)
}
return 0, errTypeMismatch
}
//Float try to convert and return current raw value as float64
func (c *Cell) Float() (float64, error) {
if c.ml.Type == types.CellTypeNumber || c.ml.Type == types.CellTypeGeneral {
return convert.ToFloat(c.ml.Value)
}
return math.NaN(), errTypeMismatch
}
//Bool try to convert and return current raw value as bool
func (c *Cell) Bool() (bool, error) {
if c.ml.Type == types.CellTypeBool || c.ml.Type == types.CellTypeGeneral || c.ml.Type == types.CellTypeNumber {
return convert.ToBool(c.ml.Value)
}
return false, errTypeMismatch
}
//setGeneral sets the value as general type
func (c *Cell) setGeneral(value string) {
c.ml.Type = types.CellTypeGeneral
c.ml.Value = value
c.ml.Formula = nil
c.ml.InlineStr = nil
}
//truncateIfRequired truncate string is exceeded allowed size
func (c *Cell) truncateIfRequired(value string) string {
if len(value) > internal.ExcelCellLimit {
value = value[:internal.ExcelCellLimit]
}
return value
}
//SetInlineString sets value as inline string
func (c *Cell) SetInlineString(value string) {
if len(value) == 0 {
c.setGeneral(value)
return
}
c.ml.Type = types.CellTypeInlineString
c.ml.Value = ""
c.ml.Formula = nil
c.ml.InlineStr = &ml.StringItem{Text: types.Text(c.truncateIfRequired(value))}
}
//SetString sets value as shared string
func (c *Cell) SetString(value string) {
if len(value) == 0 {
c.setGeneral(value)
return
}
//we can update sharedStrings only when sheet is in write mode, to prevent pollution of sharedStrings with fake values
if (c.sheet.mode() & sheetModeWrite) == 0 {
panic(errorNotSupportedWrite)
}
//sharedStrings is the only place that can be mutated from the 'sheet' perspective
sid := c.sheet.workbook.doc.sharedStrings.addString(c.truncateIfRequired(value))
c.ml.Formula = nil
c.ml.Type = types.CellTypeSharedString
c.ml.Value = strconv.Itoa(sid)
}
//SetString sets shared rich text
func (c *Cell) SetText(parts ...interface{}) error {
//we can update sharedStrings only when sheet is in write mode, to prevent pollution of sharedStrings with fake values
if (c.sheet.mode() & sheetModeWrite) == 0 {
panic(errorNotSupportedWrite)
}
//sharedStrings is the only place that can be mutated from the 'sheet' perspective
text, err := toRichText(parts...)
if err == nil {
sid := c.sheet.workbook.doc.sharedStrings.addText(text)
c.ml.Formula = nil
c.ml.Type = types.CellTypeSharedString
c.ml.Value = strconv.Itoa(sid)
}
return err
}
//SetInlineText sets inline rich text
func (c *Cell) SetInlineText(parts ...interface{}) error {
text, err := toRichText(parts...)
if err == nil {
c.ml.Type = types.CellTypeInlineString
c.ml.Value = ""
c.ml.Formula = nil
c.ml.InlineStr = text
}
return err
}
//SetInt sets an integer value
func (c *Cell) SetInt(value int) {
c.ml.Type = types.CellTypeNumber
c.ml.Value = strconv.Itoa(value)
if c.ml.Style == format.DirectStyleID(0) {
c.ml.Style = c.sheet.workbook.doc.styleSheet.typedStyles[numberFormat.Integer]
}
c.ml.Formula = nil
c.ml.InlineStr = nil
}
//SetFloat sets a float value
func (c *Cell) SetFloat(value float64) {
c.ml.Type = types.CellTypeNumber
c.ml.Value = strconv.FormatFloat(value, 'f', -1, 64)
if c.ml.Style == format.DirectStyleID(0) {
c.ml.Style = c.sheet.workbook.doc.styleSheet.typedStyles[numberFormat.Float]
}
c.ml.Formula = nil
c.ml.InlineStr = nil
}
//SetBool sets a bool value
func (c *Cell) SetBool(value bool) {
c.ml.Type = types.CellTypeBool
c.ml.Formula = nil
c.ml.InlineStr = nil
if value {
c.ml.Value = "1"
} else {
c.ml.Value = "0"
}
}
//setDate is a general setter for date types
func (c *Cell) setDate(value time.Time, t numberFormat.Type) {
c.ml.Type = types.CellTypeDate
c.ml.Value = value.Format(convert.ISO8601)
if c.ml.Style == format.DirectStyleID(0) {
c.ml.Style = c.sheet.workbook.doc.styleSheet.typedStyles[t]
}
c.ml.Formula = nil
c.ml.InlineStr = nil
}
//SetDateTime sets a time value with number format for datetime
func (c *Cell) SetDateTime(value time.Time) {
c.setDate(value, numberFormat.DateTime)
}
//SetDate sets a time value with number format for date
func (c *Cell) SetDate(value time.Time) {
c.setDate(value, numberFormat.Date)
}
//SetTime sets a time value with number format for time
func (c *Cell) SetTime(value time.Time) {
c.setDate(value, numberFormat.Time)
}
//SetDeltaTime sets a time value with number format for delta time
func (c *Cell) SetDeltaTime(value time.Time) {
c.setDate(value, numberFormat.DeltaTime)
}
//SetValue sets a value
func (c *Cell) SetValue(value interface{}) {
switch v := value.(type) {
case int:
c.SetInt(v)
case int8:
c.SetInt(int(v))
case int16:
c.SetInt(int(v))
case int32:
c.SetInt(int(v))
case int64:
c.SetInt(int(v))
case float32:
c.SetFloat(float64(v))
case float64:
c.SetFloat(v)
case string:
c.SetString(v)
case []byte:
c.SetString(string(v))
case bool:
c.SetBool(v)
case time.Time:
c.setDate(v, numberFormat.DateTime)
case []interface{}:
_ = c.SetText(v...)
case nil:
c.Reset()
default:
c.SetString(fmt.Sprintf("%v", value))
}
}
//Reset resets current current cell information
func (c *Cell) Reset() {
*c.ml = ml.Cell{Ref: c.ml.Ref}
}
//Clear clears cell's value
func (c *Cell) Clear() {
c.ml.Value = ""
}
//HasFormula returns true if cell has formula
func (c *Cell) HasFormula() bool {
return c.ml.Formula != nil && (*c.ml.Formula != ml.CellFormula{})
}
//Formatting returns DirectStyleID of active format for cell
func (c *Cell) Formatting() format.DirectStyleID {
return c.ml.Style
}
//SetFormatting sets style format to requested DirectStyleID
func (c *Cell) SetFormatting(styleID format.DirectStyleID) {
c.ml.Style = styleID
}
//SetValueWithFormat is helper function that internally works as SetValue and SetFormatting with NumberFormat
func (c *Cell) SetValueWithFormat(value interface{}, formatCode string) {
//we can update styleSheet only when sheet is in write mode, to prevent pollution of styleSheet with fake values
if (c.sheet.mode() & sheetModeWrite) == 0 {
panic(errorNotSupportedWrite)
}
styleID := c.sheet.workbook.doc.styleSheet.addStyle(format.NewStyles(format.NumberFormat(formatCode)))
c.SetValue(value)
c.ml.Style = ml.DirectStyleID(styleID)
}
//Hyperlink returns resolved HyperlinkInfo if there is any hyperlink or nil otherwise
func (c *Cell) Hyperlink() *types.HyperlinkInfo {
return c.sheet.hyperlinks.Get(c.ml.Ref)
}
//SetHyperlink sets hyperlink for cell, where link can be string or HyperlinkInfo
func (c *Cell) SetHyperlink(link interface{}) error {
if styleID, err := c.sheet.hyperlinks.Add(types.RefFromIndexes(c.ml.Ref.ToIndexes()).ToBounds(), link); err != nil {
return err
} else {
c.SetFormatting(styleID)
}
return nil
}
//SetValueWithHyperlink is helper function that internally works as SetValue and SetHyperlink
func (c *Cell) SetValueWithHyperlink(value interface{}, link interface{}) error {
err := c.SetHyperlink(link)
if err == nil {
c.SetValue(value)
}
return err
}
//RemoveHyperlink removes hyperlink from cell
func (c *Cell) RemoveHyperlink() {
c.sheet.hyperlinks.Remove(types.RefFromIndexes(c.ml.Ref.ToIndexes()).ToBounds())
}
|
package user
import "github.com/piapip/Learning-Go/Gomock/doer"
//User duper useless
type User struct {
Doer doer.Doer
}
//Use same goes with this
func (u *User) Use() error {
return u.Doer.DoSomething(123, "Hello GoMock")
}
func (u *User) take(x, y int) int {
return u.Doer.DoThisToo(x, y)
}
|
package main
import (
"flag"
"fmt"
"github.com/vincentcreusot/finance-limits/fileutils"
"github.com/vincentcreusot/finance-limits/logic"
"log"
"os"
)
func main() {
inputFileName := ""
outputFileName := ""
validateUsage(&inputFileName, &outputFileName)
lineToParseChannel := make(chan string)
go fileutils.ReadLines(inputFileName, lineToParseChannel)
parser := logic.NewFinanceLogic()
loadsToWrite, loadsErrors := parser.ParseLoads(lineToParseChannel)
if len(loadsErrors) > 0 {
for errCount, err := range loadsErrors {
log.Printf("Error #%d in load: %v\n", errCount, err)
}
}
if len(loadsToWrite) > 0 {
err := fileutils.WriteLines(outputFileName, loadsToWrite)
if err != nil {
log.Println("Error writing lines:", err)
}
}
}
func validateUsage(inputFileName *string, outputFileName *string) {
flag.StringVar(inputFileName, "inputFile", "", "File to parse")
flag.StringVar(inputFileName, "i", "", "File to parse")
flag.StringVar(outputFileName, "outputFile", "", "File to write to")
flag.StringVar(outputFileName, "o", "", "File to write to")
flag.Parse()
if *inputFileName == "" {
fmt.Println("flag -inputFile is needed")
flag.Usage()
os.Exit(1)
}
if *outputFileName == "" {
fmt.Println("flag -outputFile is needed")
flag.Usage()
os.Exit(1)
}
}
|
package main
import "github.com/ahhoefel/cdf/scene"
import "github.com/ahhoefel/cdf"
const (
width = 800
height = 800
depth = 3
numPoints = 300
)
func main() {
ptsA := cdf.RandomPointsNorm(numPoints, 60, 300)
ptsB := cdf.RandomPointsNorm(numPoints, 60, 400)
q := cdf.NewBoxQuad(depth, append(ptsA, ptsB...))
s := scene.New()
s.AddPoints(ptsA...)
s.AddPoints(ptsB...)
s.AddBoxes(q.Leaves()...)
s.Paint("image.png", width, height)
}
|
package static
import (
"os"
"path"
"strings"
"github.com/spiral/errors"
)
// Config describes file location and controls access to them.
type Config struct {
Static *struct {
// Dir contains name of directory to control access to.
Dir string
// Forbid specifies list of file extensions which are forbidden for access.
// Example: .php, .exe, .bat, .htaccess and etc.
Forbid []string
// Always specifies list of extensions which must always be served by static
// service, even if file not found.
Always []string
// Request headers to add to every static.
Request map[string]string
// Response headers to add to every static.
Response map[string]string
}
}
// Valid returns nil if config is valid.
func (c *Config) Valid() error {
const op = errors.Op("static_plugin_valid")
st, err := os.Stat(c.Static.Dir)
if err != nil {
if os.IsNotExist(err) {
return errors.E(op, errors.Errorf("root directory '%s' does not exists", c.Static.Dir))
}
return err
}
if !st.IsDir() {
return errors.E(op, errors.Errorf("invalid root directory '%s'", c.Static.Dir))
}
return nil
}
// AlwaysForbid must return true if file extension is not allowed for the upload.
func (c *Config) AlwaysForbid(filename string) bool {
ext := strings.ToLower(path.Ext(filename))
for _, v := range c.Static.Forbid {
if ext == v {
return true
}
}
return false
}
// AlwaysServe must indicate that file is expected to be served by static service.
func (c *Config) AlwaysServe(filename string) bool {
ext := strings.ToLower(path.Ext(filename))
for _, v := range c.Static.Always {
if ext == v {
return true
}
}
return false
}
|
package router
import (
"github.com/gin-gonic/gin"
"github.com/tzr2020/gin_demo/controller"
)
func SetupRouters() (r *gin.Engine) {
// 使用Gin默认路由器
r = gin.Default()
// 静态资源处理器
r.Static("/static", "static")
// 加载模板文件
r.LoadHTMLFiles("template/index.html")
// 渲染模板
r.GET("/", controller.IndexHandler)
// api v1
v1Group := r.Group("v1")
{
// Todo的路由注册
// 添加待办事项
v1Group.POST("/todo", controller.CreateTodo)
// 查看所有待办事项
v1Group.GET("todo", controller.GetTodoList)
// 修改一个待办事项
v1Group.PUT("todo/:id", controller.UpdateTodoByID)
// 删除一个待办事项
v1Group.DELETE("todo/:id", controller.DeleteTodoByID)
}
return
}
|
package gpg_test
import (
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/invidian/terraform-provider-gpg/gpg"
)
func TestProvider(t *testing.T) {
if err := gpg.Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("validating provider internally: %v", err)
}
}
|
package main
import "fmt"
func main() {
a := 2
fmt.Println(generateMatrix(a))
}
func generateMatrix(n int) [][]int {
res := [][]int{}
for i := 0; i < n; i++ {
temp := make([]int, n)
res = append(res, temp)
}
fmt.Println(res)
rowBegin := 0
rowEnd := n - 1
colBegin := 0
colEnd := n - 1
for num := 1; num <= n*n; {
for i := colBegin; i <= colEnd; i++ {
res[rowBegin][i] = num
num++
}
rowBegin++
fmt.Println(res)
for i := rowBegin; i <= rowEnd; i++ {
res[i][colEnd] = num
num++
}
colEnd--
fmt.Println(res)
if colBegin <= colEnd {
for i := colEnd; i >= colBegin; i-- {
res[rowEnd][i] = num
num++
}
}
rowEnd--
fmt.Println(res)
if rowBegin <= rowEnd {
for i := rowEnd; i >= rowBegin; i-- {
res[i][colBegin] = num
num++
}
}
colBegin++
fmt.Println(res)
}
return res
}
|
package models
import (
"errors"
"strings"
"time"
)
// User represents the user model/table
type User struct {
ID uint64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Nick string `json:"nick,omitempty"`
Email string `json:"email,omitempty"`
Password string `json:"password,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
}
// Prepare will validate and format an user model instance
func (u *User) Prepare() error {
if err := u.validate(); err != nil {
return err
}
u.format()
return nil
}
func (u *User) validate() error {
if u.Name == "" {
return errors.New("Nome é obrigatório")
}
if u.Email == "" {
return errors.New("Email é obrigatório")
}
if u.Nick == "" {
return errors.New("Nick é obrigatório")
}
if u.Password == "" {
return errors.New("Senha é obrigatório")
}
return nil
}
func (u *User) format() {
u.Name = strings.TrimSpace(u.Name)
u.Nick = strings.TrimSpace(u.Nick)
u.Email = strings.TrimSpace(u.Email)
}
|
package transport
import (
"github.com/atymkiv/echo_frame_learning/blog/cmd/api/user"
"github.com/atymkiv/echo_frame_learning/blog/model"
"github.com/labstack/echo"
"net/http"
)
type HTTP struct {
svc user.Service
}
// NewHTTP creates new user http service
func NewHTTP(svc user.Service, e *echo.Echo) {
h := HTTP{svc}
// swagger:route POST /signup users userCreate
// Creates new user account.
// responses:
// 200: userResp
// 400: errMsg
// 401: err
// 403: errMsg
// 500: err
e.POST("/signup", h.signup)
}
type authReq struct {
Email string `json:"email" validate:"required"`
Password string `json:"password" validate:"required"`
}
func (h *HTTP) signup(c echo.Context) (err error) {
r := new(authReq)
if err := c.Bind(r); err != nil {
return err
}
u, err := h.svc.Signup(c, blog.User{
Email: r.Email,
Password: r.Password,
})
if err != nil {
return err
}
return c.JSON(http.StatusOK, u)
}
|
// Copyright (c) 2020 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package starlark
import (
"os"
"strings"
"testing"
"github.com/vmware-tanzu/crash-diagnostics/ssh"
"go.starlark.net/starlarkstruct"
)
func testCrashdConfigNew(t *testing.T) {
e := New()
if e.thread == nil {
t.Error("thread is nil")
}
}
func testCrashdConfigFunc(t *testing.T) {
tests := []struct {
name string
script string
eval func(t *testing.T, script string)
}{
{
name: "crash_config saved in thread",
script: `crashd_config(workdir="fooval", default_shell="barval")`,
eval: func(t *testing.T, script string) {
defer os.RemoveAll("fooval")
exe := New()
if err := exe.Exec("test.star", strings.NewReader(script)); err != nil {
t.Fatal(err)
}
data := exe.thread.Local(identifiers.crashdCfg)
if data == nil {
t.Fatal("crashd_config not saved in thread local")
}
cfg, ok := data.(*starlarkstruct.Struct)
if !ok {
t.Fatalf("unexpected type for thread local key configs.crashd: %T", data)
}
if len(cfg.AttrNames()) != 5 {
t.Fatalf("unexpected item count in configs.crashd: %d", len(cfg.AttrNames()))
}
},
},
{
name: "crash_config returned value",
script: `cfg = crashd_config(uid="fooval", gid="barval")`,
eval: func(t *testing.T, script string) {
exe := New()
if err := exe.Exec("test.star", strings.NewReader(script)); err != nil {
t.Fatal(err)
}
data := exe.result["cfg"]
if data == nil {
t.Fatal("crashd_config function not returning value")
}
},
},
{
name: "crash_config default",
script: `one = 1`,
eval: func(t *testing.T, script string) {
exe := New()
if err := exe.Exec("test.star", strings.NewReader(script)); err != nil {
t.Fatal(err)
}
data := exe.thread.Local(identifiers.crashdCfg)
if data == nil {
t.Fatal("default crashd_config not saved in thread local")
}
cfg, ok := data.(*starlarkstruct.Struct)
if !ok {
t.Fatalf("unexpected type for thread local key crashd_config: %T", data)
}
if len(cfg.AttrNames()) != 5 {
t.Fatalf("unexpected item count in configs.crashd: %d", len(cfg.AttrNames()))
}
val, err := cfg.Attr("uid")
if err != nil {
t.Fatalf("key 'foo' not found in configs.crashd: %s", err)
}
if trimQuotes(val.String()) != getUid() {
t.Fatalf("unexpected value for key %s in configs.crashd", val.String())
}
},
},
{
name: "crash_config with use-ssh-agent",
script: `crashd_config(workdir="fooval", default_shell="barval", use_ssh_agent=True)`,
eval: func(t *testing.T, script string) {
defer os.RemoveAll("fooval")
exe := New()
if err := exe.Exec("test.star", strings.NewReader(script)); err != nil {
t.Fatal(err)
}
data := exe.thread.Local(identifiers.sshAgent)
if data == nil {
t.Fatal("use_ssh_agent identifier not saved in thread local")
}
agent, ok := data.(ssh.Agent)
if !ok || agent == nil {
t.Fatal("ssh agent should have been started")
}
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.eval(t, test.script)
})
}
}
func TestCrashdCfgAll(t *testing.T) {
tests := []struct {
name string
test func(*testing.T)
}{
{name: "testCrashdConfigNew", test: testCrashdConfigNew},
{name: "testCrashdConfigFunc", test: testCrashdConfigFunc},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
defer os.RemoveAll(defaults.workdir)
test.test(t)
})
}
}
|
package gallery
import (
"testing"
. "github.com/bborbe/assert"
)
func TestCreateImage(t *testing.T) {
var err error
imageId := "imageId123"
imageContent := "imageContent123"
image := CreateImage(imageId, imageContent)
err = AssertThat(image, NotNilValue())
if err != nil {
t.Fatal(err)
}
err = AssertThat(image.GetId(), Is(imageId))
if err != nil {
t.Fatal(err)
}
err = AssertThat(image.GetContent(), Is(imageContent))
if err != nil {
t.Fatal(err)
}
}
|
package user
import (
"errors"
"github.com/google/uuid"
)
type Id struct {
/*
- Id型の構造体を作ることで、技術的な詳細(ここではUUIDを使ってIdを生成していること)を隠蔽する
- ビジネス的にシステム一意のユーザオブジェクトを作成したいという要件があったとき、Idをどう生成するかはメインの問題ではない
- このUUIDライブラリに万が一致命的なバグがあって違うライブラリに差し替えるときも変更箇所はここだけになる
- もし異なる方法でIdを生成する(例えばDBに生成させたり)ケースであっても、Id型を作っておけば差し替えが可能
*/
value string
}
func NewId() (*Id, error) {
id, err := uuid.NewUUID()
if err != nil {
return nil, errors.New("assertion error")
}
return &Id{
value: id.String(),
}, nil
}
func (id *Id) Value() string {
return id.value
}
|
// Copyright (c) 2020 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package starlark
import (
"fmt"
"strings"
"testing"
"go.starlark.net/starlark"
"go.starlark.net/starlarkstruct"
)
func TestKubeGet(t *testing.T) {
tests := []struct {
name string
kwargs func(t *testing.T) []starlark.Tuple
eval func(t *testing.T, kwargs []starlark.Tuple)
}{
{
name: "list of services as starlark objects",
kwargs: func(t *testing.T) []starlark.Tuple {
return []starlark.Tuple{
[]starlark.Value{starlark.String("groups"), starlark.NewList([]starlark.Value{starlark.String("core")})},
[]starlark.Value{starlark.String("kinds"), starlark.NewList([]starlark.Value{starlark.String("services")})},
[]starlark.Value{starlark.String("namespaces"), starlark.NewList([]starlark.Value{starlark.String("default"), starlark.String("kube-system")})},
}
},
eval: func(t *testing.T, kwargs []starlark.Tuple) {
val, err := KubeGetFn(newTestThreadLocal(t), nil, nil, kwargs)
if err != nil {
t.Fatalf("failed to execute: %s", err)
}
resultStruct, ok := val.(*starlarkstruct.Struct)
if !ok {
t.Fatalf("expecting type *starlarkstruct.Struct, got %T", val)
}
errVal, err := resultStruct.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := resultStruct.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() != 2 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
{
name: "list of nodes as starlark objects",
kwargs: func(t *testing.T) []starlark.Tuple {
return []starlark.Tuple{
[]starlark.Value{starlark.String("groups"), starlark.NewList([]starlark.Value{starlark.String("core")})},
[]starlark.Value{starlark.String("kinds"), starlark.NewList([]starlark.Value{starlark.String("nodes")})},
}
},
eval: func(t *testing.T, kwargs []starlark.Tuple) {
val, err := KubeGetFn(newTestThreadLocal(t), nil, nil, kwargs)
if err != nil {
t.Fatalf("failed to execute: %s", err)
}
resultStruct, ok := val.(*starlarkstruct.Struct)
if !ok {
t.Fatalf("expecting type *starlarkstruct.Struct, got %T", val)
}
errVal, err := resultStruct.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := resultStruct.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() != 1 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
{
name: "different categories of objects as starlark objects",
kwargs: func(t *testing.T) []starlark.Tuple {
return []starlark.Tuple{
[]starlark.Value{starlark.String("categories"), starlark.NewList([]starlark.Value{starlark.String("all")})},
}
},
eval: func(t *testing.T, kwargs []starlark.Tuple) {
val, err := KubeGetFn(newTestThreadLocal(t), nil, nil, kwargs)
if err != nil {
t.Fatalf("failed to execute: %s", err)
}
resultStruct, ok := val.(*starlarkstruct.Struct)
if !ok {
t.Fatalf("expecting type *starlarkstruct.Struct, got %T", val)
}
errVal, err := resultStruct.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := resultStruct.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() <= 1 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.eval(t, test.kwargs(t))
})
}
}
func TestKubeGetScript(t *testing.T) {
k8sconfig := testSupport.KindKubeConfigFile()
clusterName := testSupport.KindClusterContextName()
execute := func(t *testing.T, script string) *starlarkstruct.Struct {
executor := New()
if err := executor.Exec("test.kube.capture", strings.NewReader(script)); err != nil {
t.Fatalf("failed to exec: %s", err)
}
if !executor.result.Has("kube_data") {
t.Fatalf("script result must be assigned to a value")
}
data, ok := executor.result["kube_data"].(*starlarkstruct.Struct)
if !ok {
t.Fatal("script result is not a struct")
}
return data
}
tests := []struct {
name string
script string
eval func(t *testing.T, script string)
}{
{
name: "namespaced objects as starlark objects with context",
script: fmt.Sprintf(`
set_defaults(kube_config(path="%s", cluster_context="%s"))
kube_data = kube_get(groups=["core"], kinds=["services"], namespaces=["default", "kube-system"])
`, k8sconfig, clusterName),
eval: func(t *testing.T, script string) {
data := execute(t, script)
errVal, err := data.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := data.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() != 2 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
{
name: "non-namespaced objects as starlark objects",
script: fmt.Sprintf(`
set_defaults(kube_config(path="%s"))
kube_data = kube_get(groups=["core"], kinds=["nodes"])
`, k8sconfig),
eval: func(t *testing.T, script string) {
data := execute(t, script)
errVal, err := data.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := data.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() != 1 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
{
name: "different categories of objects as starlark objects with context",
script: fmt.Sprintf(`
set_defaults(kube_config(path="%s", cluster_context="%s"))
kube_data = kube_get(categories=["all"])
`, k8sconfig, clusterName),
eval: func(t *testing.T, script string) {
data := execute(t, script)
errVal, err := data.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := data.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() < 3 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
{
name: "retrieve containers as starlark objects",
script: fmt.Sprintf(`
set_defaults(kube_config(path="%s"))
kube_data = kube_get(kinds=["pods"], namespaces=["kube-system"], containers=["etcd"])
`, k8sconfig),
eval: func(t *testing.T, script string) {
data := execute(t, script)
errVal, err := data.Attr("error")
if err != nil {
t.Error(err)
}
resultErr := errVal.(starlark.String).GoString()
if resultErr != "" {
t.Fatalf("starlark func failed: %s", resultErr)
}
objVal, err := data.Attr("objs")
if err != nil {
t.Error(err)
}
objList, ok := objVal.(*starlark.List)
if !ok {
t.Fatalf("unexpected type for starlark value")
}
if objList.Len() < 1 {
t.Errorf("unexpected object list returned: %d", objList.Len())
}
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.eval(t, test.script)
})
}
}
|
package nests
import "fmt"
func (a *Ant) trainSoluce(ns *Nests, nb int) {
a.trained = true
//a.Life = 100000000
ins := []int{0, 1, 2, 3}
if a.AntType == 1 {
ins = []int{0, 3}
}
for _, in := range ins {
direct := 0
for ii := 0; ii < nb; ii++ {
a.setEntriesSoluce(in, direct)
a.setOutsSoluce(in, direct)
a.network.Propagate(a.entries, false)
a.network.BackPropagate(a.outs)
direct++
if direct >= outNb {
direct = 0
}
}
}
}
func (a *Ant) setEntriesSoluce(in int, direct int) {
index := in * visionNb
for ii := range a.entries {
if ii == index+direct {
a.entries[ii] = 1
} else {
a.entries[ii] = 0
}
}
}
func (a *Ant) setOutsSoluce(in int, direct int) {
if in == 0 || (a.AntType == 0 && in == 3) {
direct = direct + outNb/2
if direct >= outNb {
direct = direct - outNb
}
}
for ii := range a.outs {
if ii == direct {
a.outs[ii] = 1
} else {
a.outs[ii] = 0
}
}
}
func (a *Ant) test(ns *Nests) []string {
lines := make([]string, 1, 1)
lines[0] = fmt.Sprintf("Test selected network: %v\n", a.network.Getdef())
ins := []int{0, 1, 2, 3}
if a.AntType == 1 {
ins = []int{0, 3}
}
for _, in := range ins {
dirMap := make(map[int]int)
for direct := 0; direct < 8; direct++ {
a.setEntriesSoluce(in, direct)
outs := a.network.Propagate(a.entries, true)
max := getMax(outs)
dirMap[max] = 1
lines = append(lines, fmt.Sprintf("in: %s out=%s max=%d\n", a.displayList(ns, a.entries, "%.0f"), a.displayList(ns, outs, "%.3f"), max))
}
lines = append(lines, fmt.Sprintf("Test for entries=%d distinct=%d\n", in, len(dirMap)))
}
return lines
}
func getMax(list []float64) int {
max := 0
maxVal := 0.0
for ii, val := range list {
if val > maxVal {
maxVal = val
max = ii
}
}
return max
}
|
package main
import (
"fmt"
"sort"
)
// Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
func main() {
// Test cases
nums := []int{-1, 0, 1, 2, -1, -4}
fmt.Println("[[-1 0 1] [-1 -1 2]] =", threeSum(nums))
nums2 := []int{0, 3, -3, 1, 2, -2}
fmt.Println("[[-3 0 3] [-2 0 2] [-3 1 2]] =", threeSum(nums2))
}
func threeSum(nums []int) [][]int {
var result [][]int
triplets := make(map[[3]int]bool)
for i := 0; i < len(nums)-2; i++ {
for j := i + 1; j < len(nums)-1; j++ {
for k := j + 1; k < len(nums); k++ {
if nums[i]+nums[j]+nums[k] == 0 {
currentTriplet := []int{nums[i], nums[j], nums[k]}
sort.Ints(currentTriplet)
sorted := [3]int{currentTriplet[0], currentTriplet[1], currentTriplet[2]}
if _, ok := triplets[sorted]; !ok {
triplets[sorted] = true
result = append(result, currentTriplet)
}
}
}
}
}
return result
}
|
package MessageChannels
import (
"bytes"
"encoding/json"
"main/Structs"
"net/http"
"os"
)
type slackMessage struct {
Text string `json:"text"`
}
type SlackChannel struct {
L Structs.LinkStruct
}
func (sc *SlackChannel) SendMessage() bool {
if os.Getenv("SLACK_HOOK_URL") == "" {
return false
}
message := slackMessage{Text: "*" + sc.L.Description + "* Is Down!"}
slackMessageJson, _ := json.Marshal(message)
request, _ := http.NewRequest("POST",
os.Getenv("SLACK_HOOK_URL"),
bytes.NewBuffer(slackMessageJson))
request.Header.Set("Content-Type", "application/json")
_, err := http.DefaultClient.Do(request)
if err != nil {
return false
}
return true
}
|
package main
import "fmt"
func main() {
Fizzes := []int{3}
Buzzes := []int{5}
for i := 1; i <= 100; i++ {
output := ""
decider(&Fizzes, &i, &output, "Fizz")
decider(&Buzzes, &i, &output, "Buzz")
if output == "" {
output = fmt.Sprintf("%d", i)
}
fmt.Println(output)
}
}
func decider(list *[]int, i *int, output *string, word string) {
for _, n := range *list {
if *i%n == 0 {
*output += word
}
}
}
|
package backends
import (
"encoding/json"
"fmt"
"github.com/schachmat/wego/iface"
"io/ioutil"
"log"
"net/http"
"regexp"
"strings"
"time"
)
type smhiConfig struct {
}
type smhiDataPoint struct {
Level int `json:"level"`
LevelType string `json:"levelType"`
Name string `json:"name"`
Unit string `json:"unit"`
Values []interface{} `json:"values"`
}
type smhiTimeSeries struct {
ValidTime string `json:"validTime"`
Parameters []*smhiDataPoint `json:"parameters"`
}
type smhiGeometry struct {
Coordinates [][]float32 `json:"coordinates"`
}
type smhiResponse struct {
ApprovedTime string `json:"approvedTime"`
ReferenceTime string `json:"referenceTime"`
Geometry smhiGeometry `json:"geometry"`
TimeSeries []*smhiTimeSeries `json:"timeSeries"`
}
type smhiCondition struct {
WeatherCode iface.WeatherCode
Description string
}
const (
// see http://opendata.smhi.se/apidocs/metfcst/index.html
smhiWuri = "https://opendata-download-metfcst.smhi.se/api/category/pmp3g/version/2/geotype/point/lon/%s/lat/%s/data.json"
)
var (
weatherConditions = map[int]smhiCondition{
1: {iface.CodeSunny, "Clear Sky"},
2: {iface.CodeSunny, "Nearly Clear Sky"},
3: {iface.CodePartlyCloudy, "Variable cloudiness"},
4: {iface.CodePartlyCloudy, "Halfclear sky"},
5: {iface.CodeCloudy, "Cloudy sky"},
6: {iface.CodeVeryCloudy, "Overcast"},
7: {iface.CodeFog, "Fog"},
8: {iface.CodeLightShowers, "Light rain showers"},
9: {iface.CodeLightShowers, "Moderate rain showers"},
10: {iface.CodeHeavyShowers, "Heavy rain showers"},
11: {iface.CodeThunderyShowers, "Thunderstorm"},
12: {iface.CodeLightSleetShowers, "Light sleet showers"},
13: {iface.CodeLightSleetShowers, "Moderate sleet showers"},
14: {iface.CodeHeavySnowShowers, "Heavy sleet showers"},
15: {iface.CodeLightSnowShowers, "Light snow showers"},
16: {iface.CodeLightSnowShowers, "Moderate snow showers"},
17: {iface.CodeHeavySnowShowers, "Heavy snow showers"},
18: {iface.CodeLightRain, "Light rain"},
19: {iface.CodeLightRain, "Moderate rain"},
20: {iface.CodeHeavyRain, "Heavy rain"},
21: {iface.CodeThunderyHeavyRain, "Thunder"},
22: {iface.CodeLightSleet, "Light sleet"},
23: {iface.CodeLightSleet, "Moderate sleet"},
24: {iface.CodeHeavySnow, "Heavy sleet"},
25: {iface.CodeLightSnow, "Light snowfall"},
26: {iface.CodeLightSnow, "Moderate snowfall"},
27: {iface.CodeHeavySnow, "Heavy snowfall"},
}
)
func (c *smhiConfig) Setup() {
}
func (c *smhiConfig) fetch(url string) (*smhiResponse, error) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("Unable to get (%s): %v", url, err)
} else if resp.StatusCode != 200 {
body, _ := ioutil.ReadAll(resp.Body)
quip := ""
if string(body) == "Requested point is out of bounds" {
quip = "\nPlease note that SMHI only service the nordic countries."
}
return nil, fmt.Errorf("Unable to get (%s): http status %d, %s%s", url, resp.StatusCode, body, quip)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("Unable to read response body (%s): %v", url, err)
}
var response smhiResponse
err = json.Unmarshal(body, &response)
if err != nil {
return nil, fmt.Errorf("Unable to parse response (%s): %v", url, err)
}
return &response, nil
}
func (c *smhiConfig) Fetch(location string, numDays int) (ret iface.Data) {
if matched, err := regexp.MatchString(`^-?[0-9]*(\.[0-9]+)?,-?[0-9]*(\.[0-9]+)?$`, location); !matched || err != nil {
log.Fatalf("Error: The smhi backend only supports latitude,longitude pairs as location.\nInstead of `%s` try `59.329,18.068` for example to get a forecast for Stockholm.", location)
}
s := strings.Split(location, ",")
requestUrl := fmt.Sprintf(smhiWuri, s[1], s[0])
resp, err := c.fetch(requestUrl)
if err != nil {
log.Fatalf("Failed to fetch weather data: %v\n", err)
}
ret.Current = c.parseCurrent(resp)
ret.Forecast = c.parseForecast(resp, numDays)
coordinates := resp.Geometry.Coordinates
ret.GeoLoc = &iface.LatLon{Latitude: coordinates[0][1], Longitude: coordinates[0][0]}
ret.Location = location + " (Forecast provided by SMHI)"
return ret
}
func (c *smhiConfig) parseForecast(response *smhiResponse, numDays int) (days []iface.Day) {
if numDays > 10 {
numDays = 10
}
var currentTime time.Time = time.Now()
var dayCount = 0
var day iface.Day
day.Date = time.Now()
for _, prediction := range response.TimeSeries {
if dayCount == numDays {
break
}
ts, err := time.Parse(time.RFC3339, prediction.ValidTime)
if err != nil {
log.Fatalf("Failed to parse timestamp: %v\n", err)
}
if ts.Day() != currentTime.Day() {
dayCount += 1
currentTime = ts
days = append(days, day)
day = iface.Day{Date: ts}
}
day.Slots = append(day.Slots, c.parsePrediction(prediction))
}
return days
}
func (c *smhiConfig) parseCurrent(forecast *smhiResponse) (cnd iface.Cond) {
if len(forecast.TimeSeries) < 0 {
log.Fatalln("Failed to fetch weather data: No Forecast in response")
}
var currentPrediction *smhiTimeSeries = forecast.TimeSeries[0]
var currentTime time.Time = time.Now().UTC()
for _, prediction := range forecast.TimeSeries {
ts, err := time.Parse(time.RFC3339, prediction.ValidTime)
if err != nil {
log.Fatalf("Failed to parse timestamp: %v\n", err)
}
if ts.After(currentTime) {
break
}
}
return c.parsePrediction(currentPrediction)
}
func (c *smhiConfig) parsePrediction(prediction *smhiTimeSeries) (cnd iface.Cond) {
ts, err := time.Parse(time.RFC3339, prediction.ValidTime)
if err != nil {
log.Fatalf("Failed to parse timestamp: %v\n", err)
}
cnd.Time = ts
for _, param := range prediction.Parameters {
switch param.Name {
case "pmean":
precip := float32(param.Values[0].(float64) / 1000) // Convert mm/h to m/h
cnd.PrecipM = &precip
case "vis":
vis := float32(param.Values[0].(float64) * 1000) // Convert km to m
cnd.VisibleDistM = &vis
case "t":
temp := float32(param.Values[0].(float64))
cnd.TempC = &temp
case "Wsymb2":
condition := weatherConditions[int(param.Values[0].(float64))]
cnd.Code = condition.WeatherCode
cnd.Desc = condition.Description
case "ws":
windSpeed := float32(param.Values[0].(float64) * 3.6) // convert m/s to km/h
cnd.WindspeedKmph = &windSpeed
case "gust":
gustSpeed := float32(param.Values[0].(float64) * 3.6) // convert m/s to km/h
cnd.WindGustKmph = &gustSpeed
case "wd":
val := int(param.Values[0].(float64))
cnd.WinddirDegree = &val
case "r":
val := int(param.Values[0].(float64))
cnd.Humidity = &val
default:
continue
}
}
return cnd
}
func init() {
iface.AllBackends["smhi"] = &smhiConfig{}
}
|
package main
import "fmt"
func main(){
//copying a slice
var patientZero = []int{101100, 111001, 100100} //has len = 3, cap = 3
//THE WRONG WAY
buf := make([]int, 0)
fmt.Println(len(buf), cap(buf))
copied := copy(buf, patientZero) //only copies the smallest slice
fmt.Println(copied) //is actually empty
//THE RIGHT WAY
newBuf := make([]int, len(patientZero), cap(patientZero))
copy(newBuf, patientZero)
fmt.Printf("actual copy of patientZero: %#v\n", newBuf)
//one can copy inside existing slide
ints := make([]int, 4)
copy(ints[1:3], []int{66, 99}) //ints = [0, 66, 99, 0]
fmt.Println(ints)
}
|
package port
import (
"github.com/mirzaakhena/danarisan/domain/repository"
"github.com/mirzaakhena/danarisan/domain/service"
)
// UndangPesertaOutport ...
type UndangPesertaOutport interface {
repository.FindOneArisanRepo
repository.FindOneArisanByAdminIDRepo
repository.SavePesertaRepo
repository.SaveListOfPesertaRepo
repository.FindPesertaByIDsRepo
service.TransactionDB
}
|
package main
import "fmt"
func is_prime(n int64) bool {
if n <= 1 {
return false
} else if n <= 3 {
return true
} else if n%2 == 0 || n%3 == 0 {
return false
}
i := int64(5)
for i*i <= n {
if n%i == 0 || n%(i+2) == 0 {
return false
}
i += 6
}
return true
}
// 10001st prime
func main() {
n := 2
p := int64(3)
pc := p
for n < 10001 {
pc += 2
if is_prime(pc) {
n++
p = pc
}
}
fmt.Println(p)
}
|
package main
import (
"bufio"
"io"
"io/ioutil"
"os"
"testing"
)
func ScanLines(r io.Reader) []string {
s := bufio.NewScanner(r)
var lines []string
for s.Scan() {
lines = append(lines, s.Text())
}
return lines
}
func setupTests(prefix string) (*os.File, func(), error) {
createdFile, err := ioutil.TempFile(os.TempDir(), prefix)
teardown := func() {
os.Remove(os.TempDir() + createdFile.Name())
}
createdFile.WriteString("=====\n")
createdFile.Sync()
return createdFile, teardown, err
}
func TestFileManipulator(t *testing.T) {
tempFile, teardown, err := setupTests("fm")
defer teardown()
if err != nil {
t.Error("Unable to open temp file.")
}
fm := FileManipulator{f: tempFile}
t.Run("Append", func(t *testing.T) {
s := "xxxxx\n"
fm.Append(s)
lines := ScanLines(tempFile)
if lines[len(lines)-1] != "xxxxx" {
t.Logf("Lines:%#v\n", lines)
t.Fail()
}
})
t.Run("Prepend", func(t *testing.T) {
s := "yyyyy\n"
fm.Prepend(s)
lines := ScanLines(tempFile)
if lines[0] != "yyyyy" {
t.Logf("Lines:%#v\n", lines)
t.Fail()
}
})
}
|
package mhfpacket
import (
"errors"
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/byteframe"
)
type OperateGuildMemberAction uint8
const (
_ = iota
OPERATE_GUILD_MEMBER_ACTION_ACCEPT
OPERATE_GUILD_MEMBER_ACTION_REJECT
OPERATE_GUILD_MEMBER_ACTION_KICK
)
// MsgMhfOperateGuildMember represents the MSG_MHF_OPERATE_GUILD_MEMBER
type MsgMhfOperateGuildMember struct {
AckHandle uint32
GuildID uint32
CharID uint32
Action uint8
}
// Opcode returns the ID associated with this packet type.
func (m *MsgMhfOperateGuildMember) Opcode() network.PacketID {
return network.MSG_MHF_OPERATE_GUILD_MEMBER
}
// Parse parses the packet from binary
func (m *MsgMhfOperateGuildMember) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
m.AckHandle = bf.ReadUint32()
m.GuildID = bf.ReadUint32()
m.CharID = bf.ReadUint32()
m.Action = bf.ReadUint8()
return nil
}
// Build builds a binary packet from the current data.
func (m *MsgMhfOperateGuildMember) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("Not implemented")
}
|
package main
import "fmt"
func factorial(num int) int {
var i, j int
for i = 1; i < num; i++ {
j = j * i
}
return j
}
func main() {
num := 2
j := factorial(num)
fmt.Printf("The factorial of %d is %d\n", num, j)
}
|
package inet_test
import (
"fmt"
"math/rand"
"testing"
"github.com/gaissmai/go-inet/inet"
"github.com/gaissmai/go-inet/internal"
)
func BenchmarkSortIP(b *testing.B) {
bench := []int{10000, 100000, 1000000}
for _, n := range bench {
ips := internal.GenMixed(n)
rand.Shuffle(len(ips), func(i, j int) { ips[i], ips[j] = ips[j], ips[i] })
b.Run(fmt.Sprintf("%7d", n), func(b *testing.B) {
for i := 0; i < b.N; i++ {
inet.SortIP(ips)
}
})
}
}
func BenchmarkSortBlock(b *testing.B) {
bench := []int{10000, 100000, 1000000}
for _, n := range bench {
rs := internal.GenBlockMixed(n)
rand.Shuffle(len(rs), func(i, j int) { rs[i], rs[j] = rs[j], rs[i] })
b.Run(fmt.Sprintf("%7d", n), func(b *testing.B) {
for i := 0; i < b.N; i++ {
inet.SortBlock(rs)
}
})
}
}
|
package main
import "fmt"
type student struct{
rollNo int
name string
}
func main(){
fmt.Println(student{1,"sushil"})
obj := student{2,"arati"}
fmt.Println(obj)
fmt.Println(obj.name)
fmt.Println(obj.rollNo)
fmt.Println(&obj)
fmt.Println(&obj.rollNo)
fmt.Println(&obj.name)
obj1 := student{rollNo: 3, name: "bharati"}
fmt.Println(obj1)
obj1.name = "sanjay"
fmt.Println(obj1)
}
|
package main
import (
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/gabriel-vasile/mimetype"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
"golang.design/x/clipboard"
)
type Page struct {
ID uint64
Project *Project
UpwardPage *Page
Grid *Grid
Cards []*Card
ToDelete []*Card
ToRestore []*Card
Selection *Selection
UpdateStacks bool
Drawables []*Drawable
ToRaise []*Card
IgnoreWritePan bool
Pan Point
Zoom float32
Arrowing *Card // The card that we're in the process of linking from one to another
DeserializationLinks []string
PointingSubpageCard *Card
}
var globalPageID = uint64(0)
func NewPage(project *Project) *Page {
page := &Page{
ID: globalPageID,
Project: project,
Cards: []*Card{},
Drawables: []*Drawable{},
ToRaise: []*Card{},
Zoom: 1,
}
page.Grid = NewGrid(page)
globalPageID++
page.Selection = NewSelection(page)
globals.Hierarchy.AddPage(page)
return page
}
func (page *Page) Update() {
reversed := append([]*Card{}, page.Cards...)
sort.SliceStable(reversed, func(i, j int) bool {
return j < i
})
// We update links out here so they take priority in clicking over the cards themselves. TODO: Optimize this, as this doesn't really need to be done every frame
if page.IsCurrent() {
for _, card := range reversed {
for _, link := range card.Links {
link.Update()
}
}
}
for _, card := range reversed {
card.Update()
}
if page.IsCurrent() {
// We only want to set the pan and zoom of a page if it's not loading the project (as it sets the page to be current to take screenshots for subpages).
if !page.Project.Loading && !page.IgnoreWritePan {
page.Pan = page.Project.Camera.Position
page.Zoom = page.Project.Camera.Zoom
}
if page.UpdateStacks {
// In this loop, the Stacks are subject to change.
for _, card := range page.Cards {
card.Stack.Update()
}
// From this point, the Stacks should be accurate and usable again.
for _, card := range page.Cards {
card.Stack.PostUpdate()
}
page.SendMessage(NewMessage(MessageStacksUpdated, nil, nil))
page.UpdateStacks = false
}
}
}
func (page *Page) IsCurrent() bool {
return page.Project.CurrentPage == page
}
func (page *Page) Draw() {
sorted := page.Cards[:]
sort.SliceStable(sorted, func(i, j int) bool {
return page.Cards[i].Depth < page.Cards[j].Depth
})
for _, card := range sorted {
card.DrawShadow()
}
for _, card := range sorted {
card.DrawCard()
}
for _, card := range sorted {
// Undo state creation / capturing can't be handled at the end of Card.DrawContents() like it used to be because that doesn't happen
// if the Card is offscreen. Now undo updating happens in its own function here.
// We handle undos separately so that if drawing the contents of a card changes its properties / triggers an undo update,
// that's reflected here.
card.HandleUndos()
}
for _, card := range sorted {
card.DrawLinks()
}
for _, draw := range page.Drawables {
if draw.Draw != nil {
draw.Draw()
}
}
// This needs to be later than Update() so mouse buttons can be consumed in a Card's Draw() loop, for example, before the Selection detects the mouse button press
page.Selection.Update()
page.Selection.Draw()
for _, toDelete := range page.ToDelete {
page.Selection.Remove(toDelete)
for index, card := range page.Cards {
if card == toDelete {
card.Valid = false
page.Cards[index] = nil
page.Cards = append(page.Cards[:index], page.Cards[index+1:]...)
break
}
}
}
for _, toRestore := range page.ToRestore {
// page.Selection.Add(toRestore)
page.Cards = append(page.Cards, toRestore)
toRestore.Valid = true
}
for _, toRaise := range page.ToRaise {
for index, other := range page.Cards {
if other == toRaise {
page.Cards = append(page.Cards[:index], append(page.Cards[index+1:], toRaise)...)
break
}
}
}
page.ToDelete = []*Card{}
page.ToRestore = []*Card{}
page.ToRaise = []*Card{}
page.UpdateLinks()
}
func (page *Page) Destroy() {
for _, card := range page.Cards {
card.Destroy()
}
page.Cards = nil
page.Drawables = nil
page.Grid = nil
page.PointingSubpageCard = nil
page.Selection = nil
page.ToDelete = nil
page.ToRaise = nil
page.ToRestore = nil
page.UpwardPage = nil
}
func (page *Page) Valid() bool {
// Root page
if page == page.Project.Pages[0] {
return true
}
// Deleted page or orphan
if page.PointingSubpageCard == nil || !page.PointingSubpageCard.Valid {
return false
}
// See if we have a valid path to the root
return page.PointingSubpageCard.Page.Valid()
}
func (page *Page) Name() string {
if page.PointingSubpageCard != nil {
return page.PointingSubpageCard.Properties.Get("description").AsString()
}
return "Root"
}
func (page *Page) Serialize() string {
pageData := "{}"
pageData, _ = sjson.Set(pageData, "id", page.ID)
pageData, _ = sjson.Set(pageData, "pan", page.Pan)
pageData, _ = sjson.Set(pageData, "zoom", page.Zoom)
// Sort the cards by their position so the serialization is more stable. (Otherwise, clicking on
// a Card adjusts the sort order, and therefore the order in which Cards are serialized.)
cards := append([]*Card{}, page.Cards...)
sort.Slice(cards, func(i, j int) bool {
return cards[i].Rect.Y < cards[j].Rect.Y || (cards[i].Rect.Y == cards[j].Rect.Y && cards[i].Rect.X < cards[j].Rect.X)
})
for _, card := range cards {
pageData, _ = sjson.SetRaw(pageData, "cards.-1", card.Serialize())
}
return pageData
}
func (page *Page) DeserializePageData(data string) {
if id := gjson.Get(data, "id"); id.Exists() {
page.ID = id.Uint()
}
log.Println("Deserializing page ", page.ID)
lp := gjson.Get(data, "pan").Map()
page.Pan.X = float32(lp["X"].Float())
page.Pan.Y = float32(lp["Y"].Float())
page.Zoom = float32(gjson.Get(data, "zoom").Float())
if page.Zoom == 0 {
page.Zoom = 1
}
if globalPageID < page.ID {
globalPageID = page.ID + 1
}
}
func (page *Page) DeserializeCards(data string) {
for _, cardData := range gjson.Get(data, "cards").Array() {
log.Println("Deserializing card ", cardData.Get("id").Int())
newCard := page.CreateNewCard(ContentTypeCheckbox)
newCard.Deserialize(cardData.Raw)
}
}
func (page *Page) AddDrawable(drawable *Drawable) {
page.Drawables = append(page.Drawables, drawable)
}
func (page *Page) RemoveDrawable(drawable *Drawable) {
for i, d := range page.Drawables {
if d == drawable {
page.Drawables[i] = nil
page.Drawables = append(page.Drawables[:i], page.Drawables[i+1:]...)
return
}
}
}
func (page *Page) UpdateLinks() {
for _, linkString := range page.DeserializationLinks {
var start, end *Card
if page.Project.Loading {
start = page.CardByLoadedID(gjson.Get(linkString, "start").Int())
end = page.CardByLoadedID(gjson.Get(linkString, "end").Int())
} else {
start = page.CardByID(gjson.Get(linkString, "start").Int())
end = page.CardByID(gjson.Get(linkString, "end").Int())
}
if start != nil && end != nil {
link, fresh := start.Link(end)
joints := gjson.Get(linkString, "joints").Array()
// If the link wasn't freshly created, then the joints should have been set already
if link != nil && fresh {
link.Joints = []*LinkJoint{}
for _, joint := range joints {
jm := joint.Map()
link.Joints = append(link.Joints, NewLinkJoint(float32(jm["X"].Float()), float32(jm["Y"].Float())))
}
}
}
}
page.DeserializationLinks = []string{}
}
func (page *Page) CreateNewCard(contentType string) *Card {
if !page.Project.Loading {
page.Project.LastCardType = contentType
}
newCard := NewCard(page, contentType)
newCard.Rect.X = globals.Mouse.WorldPosition().X - (newCard.Rect.W / 2)
newCard.Rect.Y = globals.Mouse.WorldPosition().Y - (newCard.Rect.H / 2)
newCard.LockPosition()
page.Cards = append(page.Cards, newCard)
newCard.Valid = true
page.Project.UndoHistory.Capture(NewUndoState(newCard))
globals.EventLog.Log("Created new Card.", false)
return newCard
}
func (page *Page) CardByID(id int64) *Card {
for _, card := range page.Cards {
if card.ID == id {
return card
}
}
return nil
}
func (page *Page) CardByLoadedID(id int64) *Card {
for _, card := range page.Cards {
if card.LoadedID == id {
return card
}
}
return nil
}
func (page *Page) DeleteCards(cards ...*Card) {
// no need to log "Deleted 0 cards"
if len(cards) > 0 {
globals.EventLog.Log("Deleted %d Cards.", false, len(cards))
deletion := NewMessage(MessageCardDeleted, nil, nil)
for _, card := range cards {
card.Valid = false
card.ReceiveMessage(deletion)
}
page.ToDelete = append(page.ToDelete, cards...)
}
}
func (page *Page) RestoreCards(cards ...*Card) {
restoration := NewMessage(MessageCardRestored, nil, nil)
for _, card := range cards {
card.Valid = true
card.ReceiveMessage(restoration)
}
page.ToRestore = append(page.ToRestore, cards...)
}
func (page *Page) CopySelectedCards() {
globals.CopyBuffer.Clear()
for card := range page.Selection.Cards {
globals.CopyBuffer.Copy(card)
}
if len(globals.CopyBuffer.Cards) > 0 {
if globals.CopyBuffer.CutMode {
globals.EventLog.Log("Cut %d Cards.", false, len(globals.CopyBuffer.Cards))
} else {
globals.EventLog.Log("Copied %d Cards.", false, len(globals.CopyBuffer.Cards))
}
}
}
func (page *Page) PasteCards(offset Point, adhereToMousePosition bool) []*Card {
prevEventLog := globals.EventLog.On
globals.EventLog.On = false
newCards := []*Card{}
oldToNew := map[*Card]*Card{}
page.Selection.Clear()
invalidCut := false
for i := 0; i < len(globals.CopyBuffer.Cards); i++ {
card := globals.CopyBuffer.Cards[i]
if card.ContentType == ContentTypeSubpage && globals.CopyBuffer.CutMode {
invalidCut = true
globals.EventLog.Log("Cannot cut a sub-page card.", true)
continue
}
newCard := page.CreateNewCard(ContentTypeCheckbox)
newCards = append(newCards, newCard)
oldToNew[globals.CopyBuffer.Cards[i]] = newCard
}
for i, card := range globals.CopyBuffer.Cards {
// If we try pasting sub-page cards and these were denied, skip them
if _, exists := oldToNew[card]; !exists {
continue
}
serialized := globals.CopyBuffer.CardsToSerialized[card]
serialized, _ = sjson.Set(serialized, "id", oldToNew[card].ID)
if links := gjson.Get(serialized, "links"); links.Exists() {
for linkIndex, link := range links.Array() {
for old, new := range oldToNew {
if old.ID == link.Get("start").Int() {
serialized, _ = sjson.Set(serialized, "links."+strconv.Itoa(linkIndex)+".start", new.ID)
}
if old.ID == link.Get("end").Int() {
serialized, _ = sjson.Set(serialized, "links."+strconv.Itoa(linkIndex)+".end", new.ID)
}
}
}
}
newCard := newCards[i]
newCard.Deserialize(serialized)
page.Selection.Add(newCard)
}
// We do this because otherwise when creating an undo state below, the links wouldn't be included
page.UpdateLinks()
if adhereToMousePosition {
for _, card := range newCards {
offset = offset.Add(Point{card.Rect.X + (card.Rect.W / 2), card.Rect.Y + (card.Rect.H / 2)})
}
offset = offset.Div(float32(len(newCards)))
offset = globals.Mouse.WorldPosition().Sub(offset)
}
for _, card := range newCards {
card.Rect.X += offset.X
card.Rect.Y += offset.Y
card.DisplayRect.X = card.Rect.X
card.DisplayRect.Y = card.Rect.Y
card.DisplayRect.W = card.Rect.W
card.DisplayRect.H = card.Rect.H
card.LockPosition()
}
for _, card := range newCards {
page.Project.UndoHistory.Capture(NewUndoState(card))
}
if globals.CopyBuffer.CutMode {
for _, card := range globals.CopyBuffer.Cards {
// If we try pasting sub-page cards and these were denied, skip them
if _, exists := oldToNew[card]; !exists {
continue
}
card.Page.DeleteCards(card)
}
globals.CopyBuffer.CutMode = false
}
globals.EventLog.On = prevEventLog
if len(globals.CopyBuffer.Cards) > 0 {
globals.EventLog.Log("Pasted %d Cards.", false, len(globals.CopyBuffer.Cards))
}
if invalidCut {
globals.CopyBuffer.Clear()
}
return newCards
}
func (page *Page) Raise(card *Card) {
if len(page.Cards) <= 1 {
return
}
page.ToRaise = append(page.ToRaise, card)
}
func (page *Page) HandleDroppedFiles(filePath string) {
mime, _ := mimetype.DetectFile(filePath)
mimeType := mime.String()
var card *Card
// We check for tga specifically because the mimetype doesn't seem to detect this properly.
if strings.Contains(mimeType, "image") || filepath.Ext(filePath) == ".tga" {
card = page.CreateNewCard(ContentTypeImage)
card.Contents.(*ImageContents).LoadFileFrom(filePath)
} else if strings.Contains(mimeType, "audio") {
card = page.CreateNewCard(ContentTypeSound)
card.Contents.(*SoundContents).LoadFileFrom(filePath)
} else if strings.Contains(mimeType, "json") && strings.Contains(filepath.Ext(filePath), ".plan") {
globals.Project.LoadConfirmationTo = filePath
loadConfirm := globals.MenuSystem.Get("confirm load")
loadConfirm.Center()
loadConfirm.Open()
} else if strings.Contains(mimeType, "text") {
text, err := os.ReadFile(filePath)
if err != nil {
globals.EventLog.Log(err.Error(), false)
} else {
card = page.CreateNewCard(ContentTypeCheckbox)
card.Properties.Get("description").Set(string(text))
size := globals.TextRenderer.MeasureText([]rune(string(text)), 1)
card.Recreate(size.X, size.Y)
card.SetContents(ContentTypeNote)
}
} else {
globals.EventLog.Log("Dropped file [%s] is not a recognized image, audio, or text file format.", true, filePath)
}
if card != nil {
card.Rect.X = page.Project.Camera.Position.X - (card.Rect.W / 2)
card.Rect.Y = page.Project.Camera.Position.Y - (card.Rect.H / 2)
card.LockPosition()
}
}
func (page *Page) HandleExternalPaste() {
if clipboardImg := clipboard.Read(clipboard.FmtImage); clipboardImg != nil {
if filePath, err := WriteImageToTemp(clipboardImg); err != nil {
globals.EventLog.Log(err.Error(), false)
} else {
globals.Resources.Get(filePath).TempFile = true
globals.Resources.Get(filePath).SaveFile = true
card := page.CreateNewCard(ContentTypeImage)
contents := card.Contents.(*ImageContents)
contents.LoadFileFrom(filePath)
card.Properties.Get("saveimage").Set(true)
}
} else if txt := clipboard.Read(clipboard.FmtText); txt != nil {
text := string(txt)
if res := globals.Resources.Get(text); res != nil && res.MimeType != "" {
if strings.Contains(res.MimeType, "image") || res.Extension == ".tga" || res.Extension == ".svg" {
card := page.CreateNewCard(ContentTypeImage)
card.Contents.(*ImageContents).LoadFileFrom(text)
} else if strings.Contains(res.MimeType, "audio") {
card := page.CreateNewCard(ContentTypeSound)
card.Contents.(*SoundContents).LoadFileFrom(text)
} else {
globals.EventLog.Log("WARNING: Unsure of type of file at pasted link:\n%s\nNo card was created for this link.", true, text)
}
} else {
text = strings.ReplaceAll(text, "\r\n", "\n")
textLines := strings.Split(text, "\n")
// Get rid of empty starting and ending
tl := []string{}
for _, t := range textLines {
if len(strings.TrimSpace(t)) > 0 {
tl = append(tl, t)
}
}
// for strings.TrimSpace(textLines[0]) == "" && len(textLines) > 0 {
// textLines = textLines[1:]
// }
// for strings.TrimSpace(textLines[len(textLines)-1]) == "" && len(textLines) > 0 {
// textLines = textLines[:len(textLines)-1]
// }
if len(tl) == 0 {
return
}
todoList := strings.HasPrefix(tl[0], "[")
if todoList {
linesOut := []string{}
for _, clipLine := range tl {
if len(clipLine) == 0 {
continue
}
if clipLine[0] != '[' {
linesOut[len(linesOut)-1] += "\n" + clipLine
} else {
linesOut = append(linesOut, clipLine)
}
}
globals.EventLog.On = false
pos := globals.Mouse.WorldPosition().LockToGrid()
for _, taskLine := range linesOut {
var card *Card
if taskLine[1] == 'x' || taskLine[1] == 'o' || taskLine[1] == ' ' {
card = page.CreateNewCard(ContentTypeCheckbox)
card.Rect.X = pos.X
card.Rect.Y = pos.Y
card.LockPosition()
completed := taskLine[:3] != "[ ]"
taskLine = taskLine[3:]
taskLine = strings.TrimSpace(taskLine)
textMeasure := globals.TextRenderer.MeasureText([]rune(taskLine), 1)
card.Recreate(textMeasure.X+(globals.GridSize*2), textMeasure.Y+(card.Contents.DefaultSize().Y-globals.GridSize))
card.Properties.Get("description").Set(taskLine)
if completed {
card.Properties.Get("checked").Set(true)
}
} else {
card = page.CreateNewCard(ContentTypeNumbered)
card.Rect.X = pos.X
card.Rect.Y = pos.Y
card.LockPosition()
endingBracket := strings.Index(taskLine, "]")
taskLineText := taskLine[endingBracket+1:]
taskLineText = strings.TrimSpace(taskLineText)
slashIndex := strings.IndexAny(taskLine, `/\`)
if slashIndex > 0 {
current, _ := strconv.ParseFloat(taskLine[1:slashIndex], 64)
max, _ := strconv.ParseFloat(taskLine[slashIndex+1:endingBracket], 64)
card.Properties.Get("current").Set(current)
card.Properties.Get("maximum").Set(max)
}
textMeasure := globals.TextRenderer.MeasureText([]rune(taskLineText), 1)
card.Recreate(textMeasure.X+(globals.GridSize*2), textMeasure.Y+(card.Contents.DefaultSize().Y-globals.GridSize))
card.Properties.Get("description").Set(taskLineText)
}
pos.Y += card.Rect.H
}
globals.EventLog.On = true
globals.EventLog.Log("Pasted %d new Checkbox Tasks from clipboard content.", false, len(linesOut))
} else {
maxWidth := float32(512)
card := page.CreateNewCard(ContentTypeNote)
card.Properties.Get("description").Set(text)
size := globals.TextRenderer.MeasureText([]rune(text), 1)
note := card.Contents.(*NoteContents)
note.Label.SetText([]rune(text))
if size.X > maxWidth {
newSize := globals.TextRenderer.MeasureTextAutowrap(maxWidth, text)
card.Recreate(newSize.X+(globals.GridSize*4), newSize.Y)
} else {
width := size.X + (globals.GridSize * 2)
height := size.Y
card.Recreate(width, height)
}
}
}
} else {
globals.EventLog.Log("No data found in clipboard.", true)
}
page.UpdateStacks = true
}
func (page *Page) SelectNextCard() *Card {
var nextCard *Card
kb := globals.Keybindings
cardList := append([]*Card{}, page.Cards...)
if len(cardList) > 0 {
sort.SliceStable(cardList, func(i, j int) bool {
if cardList[i].Rect.Y == cardList[j].Rect.Y {
return cardList[i].Rect.X < cardList[j].Rect.X
}
return cardList[i].Rect.Y < cardList[j].Rect.Y
})
selectionIndex := 0
prev := false
if kb.Pressed(KBSelectCardPrev) {
prev = true
}
for i, c := range cardList {
if c.selected {
if prev {
selectionIndex = i - 1
} else {
selectionIndex = i + 1
}
break
}
}
if selectionIndex < 0 {
selectionIndex = 0
}
if selectionIndex >= len(cardList)-1 {
selectionIndex = len(cardList) - 1
}
if selectionIndex < len(cardList) {
nextCard = cardList[selectionIndex]
page.Selection.Clear()
page.Selection.Add(nextCard)
if globals.Settings.Get(SettingsFocusOnSelectingWithKeys).AsBool() {
page.Project.Camera.FocusOn(false, page.Selection.AsSlice()...)
}
kb.Shortcuts[KBSelectCardNext].ConsumeKeys()
}
}
return nextCard
}
func (page *Page) SendMessage(msg *Message) {
for _, card := range page.Cards {
card.ReceiveMessage(msg)
}
}
|
//如下对int封装为另一个类型,并提供一个Increase方法
package main
import "fmt"
type TZ int
type A struct {
}
func main() {
var a TZ
a.Increase(100)
fmt.Println(a)
}
func (tz *TZ) Increase(num int) {
*tz += TZ(num)
//+=操作的左右两端类型必须匹配,虽然TZ底层类型是int,但是和TZ是不同类型,需要将int转换为TZ。
}
|
package function
import (
"fmt"
"sort"
)
func ExampleSort() {
// case int
intS := []int{-3, 2, 0, 8, -5, 1}
sort.Ints(intS)
fmt.Println(intS)
// case float64
floatS := []float64{-3.5, 2.5, 0.5, 8.5, -5.5, 1.5}
sort.Float64s(floatS)
fmt.Println(floatS)
// case string
stringS := []string{"orange", "lemon", "banana", "apple"}
sort.Strings(stringS)
fmt.Println(stringS)
// case struct
structS := []struct {
to int
cost int
}{
{3, 1},
{2, 10},
{4, 4},
}
sort.Slice(structS, func(i, j int) bool {
// define Less function
return structS[i].cost < structS[j].cost
})
fmt.Println(structS)
// Output:
// [-5 -3 0 1 2 8]
// [-5.5 -3.5 0.5 1.5 2.5 8.5]
// [apple banana lemon orange]
// [{3 1} {4 4} {2 10}]
}
|
package requests
import (
"encoding/json"
"testing"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/assert"
)
func TestDecodeWalletRepresentativeSetRequest(t *testing.T) {
encoded := `{"action":"wallet_representative_set","wallet":"1234","representative":"nano_2"}`
var decoded WalletRepresentativeSetRequest
json.Unmarshal([]byte(encoded), &decoded)
assert.Equal(t, "wallet_representative_set", decoded.Action)
assert.Equal(t, "1234", decoded.Wallet)
assert.Equal(t, "nano_2", decoded.Representative)
assert.Nil(t, decoded.BpowKey)
}
func TestMapStructureDecodeWalletRepresentativeSetRequest(t *testing.T) {
request := map[string]interface{}{
"action": "wallet_representative_set",
"wallet": "1234",
"representative": "nano_2",
}
var decoded WalletRepresentativeSetRequest
mapstructure.Decode(request, &decoded)
assert.Equal(t, "wallet_representative_set", decoded.Action)
assert.Equal(t, "1234", decoded.Wallet)
assert.Equal(t, "nano_2", decoded.Representative)
assert.Nil(t, decoded.BpowKey)
}
|
package el
import (
"fmt"
"strings"
"unicode"
)
type EL struct {
alphabetLength int
runeAlphabet map[rune]int
intAlphabet map[int]rune
decodeMap map[int]string
encodeMap map[string]string
}
// Init initializes the variables for the given EL variable
func (el *EL) Init() {
el.decodeMap = map[int]string{
11: "Α", 12: "Β", 13: "Γ", 14: "Δ", 15: "Ε",
21: "Ζ", 22: "Η", 23: "Θ", 24: "Ι", 25: "Κ",
31: "Λ", 32: "Μ", 33: "Ν", 34: "Ξ", 35: "Ο",
41: "Π", 42: "Ρ", 43: "Σ", 44: "Τ", 45: "Υ",
51: "Φ", 52: "Χ", 53: "Ψ", 54: "Ω"}
el.encodeMap = map[string]string{
"Α": "11", "Β": "12", "Γ": "13", "Δ": "14", "Ε": "15",
"Ζ": "21", "Η": "22", "Θ": "23", "Ι": "24", "Κ": "25",
"Λ": "31", "Μ": "32", "Ν": "33", "Ξ": "34", "Ο": "35",
"Π": "41", "Ρ": "42", "Σ": "43", "Τ": "44", "Υ": "45",
"Φ": "51", "Χ": "52", "Ψ": "53", "Ω": "54"}
el.intAlphabet = map[int]rune{
0: 'Α', 1: 'Β', 2: 'Γ', 3: 'Δ', 4: 'Ε',
5: 'Ζ', 6: 'Η', 7: 'Θ', 8: 'Ι', 9: 'Κ',
10: 'Λ', 11: 'Μ', 12: 'Ν', 13: 'Ξ', 14: 'Ο',
15: 'Π', 16: 'Ρ', 17: 'Σ', 18: 'Τ', 19: 'Υ',
20: 'Φ', 21: 'Χ', 22: 'Ψ', 23: 'Ω'}
el.runeAlphabet = map[rune]int{
'Α': 0, 'Β': 1, 'Γ': 2, 'Δ': 3, 'Ε': 4,
'Ζ': 5, 'Η': 6, 'Θ': 7, 'Ι': 8, 'Κ': 9,
'Λ': 10, 'Μ': 11, 'Ν': 12, 'Ξ': 13, 'Ο': 14,
'Π': 15, 'Ρ': 16, 'Σ': 17, 'Τ': 18, 'Υ': 19,
'Φ': 20, 'Χ': 21, 'Ψ': 22, 'Ω': 23}
el.alphabetLength = len(el.runeAlphabet)
}
// stripSpace removes all spaces from the given string
func stripSpace(s string) string {
var returnString string
for _, c := range []rune(s) {
if unicode.IsSpace(c) || unicode.IsControl(c) {
continue
}
returnString += string(c)
}
return returnString
}
// toUpper converts the given string to all uppercase characters
func toUpper(s string) string {
return strings.ToUpper(s)
}
// isValidString checks if the given string is valid in Greek
func (el *EL) isValidString(s string) (bool, error) {
for _, c := range []rune(s) {
if unicode.IsSpace(c) || unicode.IsSymbol(c) || unicode.IsPunct(c) {
continue
}
_, result := el.runeAlphabet[c]
if !result {
err := fmt.Errorf("The given string is not valid for the language: %s", s)
return false, err
}
}
return true, nil
}
|
package sgs
import (
"testing"
"time"
)
func TestScript3(t *testing.T) {
srv, _ := makeSSrv(SSrvParam{
Profile: "test",
DefaultClients: 2,
MinimalClients: 2,
OptimalWS: 30,
BaseTickMs: 10,
ABF: buildMockApp,
})
rl := makeresLogger()
p1 := makePlayer("regn", 22, rl, srv)
p2 := makePlayer("yaya", 33, rl, srv)
p3 := makePlayer("maotao", 44, rl, srv)
tr := true
fls := false
p1.s = script{
{1000, scriptedJSQ, &tr},
{2000, scriptedJSQ, &tr},
{4000, scriptedQSQ, &fls},
}
p2.s = script{
{3000, scriptedJSQ, &tr},
}
p3.s = script{
{1500, scriptedQSQ, &fls},
}
go rl.run()
go p1.run(t)
go p2.run(t)
go p3.run(t)
<-time.After(time.Duration(5) * time.Second)
rl.mch <- "quit"
<-time.After(time.Duration(50) * time.Millisecond)
if !rl.cl.conformTo(commandLog{
commandLE{3000, _CMD_INIT_APP, 22},
commandLE{3000, _CMD_INIT_APP, 33},
commandLE{3000, CMD_APP_RUN, 22},
commandLE{3000, CMD_APP_RUN, 33},
}) {
t.Errorf("Command Log does not conform to expectation")
}
}
|
// Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package about
import "fmt"
var (
version = "0.0.0" // semantic version X.Y.Z
gitCommit = "00000000" // sha1 from git
buildTime = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
func DisplayVersion() {
fmt.Printf("Version: %s\nBuildTime: %s\nGitCommit: %s\n", version, buildTime, gitCommit)
}
|
package models
const (
JobOk uint8 = 0
JobFailed uint8 = 1
)
type Job struct {
Token string `json:"token"`
Url string `json:"url"`
AppID string `json:"app_id"`
}
type JobResult struct {
Job
HTML string
Status uint8
}
|
package main
import (
"context"
"fmt"
"github.com/coreos/etcd/clientv3"
"time"
)
func main() {
//在服务器启动etcd
// nohup ./etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls 'http://0.0.0.0:2379' &
config := clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"}, //集群列表
DialTimeout: 5 * time.Second,
}
//建立一个客户端
client, err := clientv3.New(config)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("链接成功")
//lease实现锁自动过期
//op 操作
//txn事务 if else then
//1,上锁(创建租约,自动续租,拿着租约去抢占一个key
//申请一个lease(租约)
lease := clientv3.NewLease(client)
//5s的租约
leaseGrantResp, err := lease.Grant(context.TODO(), 5)
if err != nil {
fmt.Println(err)
return
}
leaseId := leaseGrantResp.ID
//准备一个用于取消自动续租的context
ctx, cancelFunc := context.WithCancel(context.TODO())
defer cancelFunc() //确保函数退出后,自动续租停止
defer lease.Revoke(context.TODO(), leaseId) //撤回租约
//自动续租约
keepAliveChan, err := lease.KeepAlive(ctx, leaseId)
if err != nil {
fmt.Println(err)
return
}
//启动一个协程,接收keepAlive的信息
go func() {
for {
select {
case keepResp := <-keepAliveChan:
if keepAliveChan == nil {
fmt.Println("租约已经失效了")
goto END
} else if keepResp != nil {
//每秒会续租一次,所以就会受到一次应答
fmt.Println("收到自动续租应答", keepResp.ID)
}
}
}
END:
fmt.Println("结束接收应答的协程")
}()
//if 不存在key, then 设置它,else 抢锁失败
kv := clientv3.NewKV(client)
//创建事务
txn := kv.Txn(context.TODO())
//定义事务
//如果key不存在
txn.If(clientv3.Compare(clientv3.CreateRevision("/cron/lock/job9"), "=", 0)).
Then(clientv3.OpPut("/cron/lock/job9", "xxx", clientv3.WithLease(leaseId))).
Else(clientv3.OpGet("/cron/lock/job9")) //否则抢锁失败
txnResp, err := txn.Commit()
if err != nil {
fmt.Println(err)
return //提交失败
}
//判断是否报到了锁
if !txnResp.Succeeded {
fmt.Println("锁被占用:", string(txnResp.Responses[0].GetResponseRange().Kvs[0].Value))
return
}
//2,处理业务
//在锁内,很安全
fmt.Println("处理任务")
time.Sleep(5 * time.Second)
//3,释放锁(取消自动续租).释放租约
//defer会把租约自动取消
}
|
package model
import (
"context"
"github.com/gorhill/cronexpr"
"time"
)
// JobLog is data carrier for the log for job
type JobLog struct {
JobName string `json:"jobName" bson:"jobName"` // job name
Command string `json:"command" bson:"command"` // job shell command
Err string `json:"err" bson:"err"` // err string
Output string `json:"output" bson:"output"` // job output
PlanTime int64 `json:"planTime" bson:"planTime"` // job planned start time
ScheduleTime int64 `json:"scheduleTime" bson:"scheduleTime"` // actual scheduled time
StartTime int64 `json:"startTime" bson:"startTime"` // actual start time of the job
EndTime int64 `json:"endTime" bson:"endTime"` // actual job end time
}
// LogBatch
type LogBatch struct {
Logs []interface{} // multiple logs
}
// Job is a cron job
type Job struct {
Name string `json:"name"` // job name
Command string `json:"command"` // shell command
CronExpr string `json:"cronExpr"` // cron expression
}
// JobExecuteInfo wraps around job and has its meta information
type JobExecuteInfo struct {
Job *Job // actual job
PlanTime time.Time // planned exec time in theory
RealTime time.Time // actual exec time
CancelCtx context.Context // context
CancelFunc context.CancelFunc // used to cancel the exec
}
// JobExecuteResult
type JobExecuteResult struct {
ExecuteInfo *JobExecuteInfo
Output []byte // job output
Err error // err
StartTime time.Time // start time
EndTime time.Time // end time
}
type JobSchedulePlan struct {
Job *Job
Expr *cronexpr.Expression
NextTime time.Time
}
// JobEvent
type JobEvent struct {
EventType int // SAVE, DELETE
Job *Job
}
const (
JobEventSave = 1
JobEventDelete = 2
JobEventKill = 3
JobSaveDir = "/croncord/jobs/"
JobKillerDir = "/croncord/killer/"
JobLockDir = "/croncord/lock/"
JobWorkerDir = "/croncord/workers/"
)
|
package main
import (
bouncer "github.com/Karagar/final_project/bouncer"
)
func main() {
service := &bouncer.Service{}
service.InitService()
}
|
package boshio
import (
"fmt"
"net"
"net/http"
"net/url"
"os"
"time"
)
func NewHTTPClient(host string, wait time.Duration) HTTPClient {
return HTTPClient{
Host: host,
Wait: wait,
Client: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
// The OS determines the number of failed keepalive probes before the connection is closed.
// The default is 9 retries on Linux.
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
DisableKeepAlives: true, // don't re-use TCP connections between requests
},
},
}
}
type HTTPClient struct {
Host string
Wait time.Duration
Client *http.Client
}
func (h HTTPClient) Do(req *http.Request) (*http.Response, error) {
root, err := url.Parse(h.Host)
if err != nil {
return &http.Response{}, fmt.Errorf("failed to parse URL: %s", err)
}
if req.URL.Host == "" {
req.URL.Host = root.Host
req.URL.Scheme = root.Scheme
}
var resp *http.Response
for {
resp, err = h.Client.Do(req)
if netErr, ok := err.(net.Error); ok {
if netErr.Temporary() {
fmt.Fprintf(os.Stderr, "Retrying on temporary error: %s", netErr.Error())
time.Sleep(h.Wait)
continue
}
}
break
}
return resp, err
}
|
package util
import (
"time"
"gopkg.in/pg.v3"
"fmt"
)
var Db *pg.DB
func Init() {
config := InitConfig()
Db = singleConnect(config)
}
func singleConnect(config *ServerConfig) *pg.DB {
return pg.Connect(pgOptions(config.Host[0], config.Port, config.User, config.Password, config.Database))
}
var Index = 32
//var Dbs [Index] *pg.DB
var Dbs []*pg.DB
func InitDbs() {
config := InitConfig()
Dbs = make([]*pg.DB, 32)
connect(config, Dbs)
}
func connect(config *ServerConfig, dbs []*pg.DB) {
for i, host := range config.Host{
dbs[i] = pg.Connect(pgOptions(host, config.Port, config.User, config.Password, config.Database))
}
}
func pgOptions(host string, port string, user string, password string, database string) *pg.Options {
return &pg.Options{
Host: host,
Port: port,
User: user,
Password: password,
Database: database,
DialTimeout: 30 * time.Second,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
PoolSize: 10,
PoolTimeout: 30 * time.Second,
IdleTimeout: 10 * time.Second,
IdleCheckFrequency: 100 * time.Millisecond,
}
}
func CaculateDbAndTableIndex(userId int) (int, int) {
last4digit := userId % 10000
//for product enviroment
//dbIndex := last4digit % Index
//tableIndex := last4digit / Index % Index
//for local test enviroment
dbIndex := last4digit % 1
tableIndex := last4digit / 1 % 2
return dbIndex, tableIndex
}
func CaculateDbAndTable(userId int) (*pg.DB, string) {
dbIndex, tableIndex := CaculateDbAndTableIndex(userId)
return Dbs[dbIndex], fmt.Sprintf("%s_%d", "relation", tableIndex)
}
|
package main
import (
"fmt"
"time"
)
type Sender chan<- string
type Receiver <-chan string
func main() {
ch1 := make(chan string, 3)
go func() {
fmt.Println("start send to ch1")
ch1 <- "hello"
fmt.Println("end send to ch1")
}()
time.Sleep(time.Duration(2)*time.Second)
var value string = "receive from ch1: " + <- ch1
fmt.Println(value)
close(ch1)
ch2 := make(chan string, 0)
var sender Sender = ch2
var receiver Receiver = ch2
go func() {
fmt.Println("start send to ch2")
sender <- "hello"
fmt.Println("end send to ch2")
}()
time.Sleep(time.Duration(2)*time.Second)
var value2 string = "receive from ch2: " + <- receiver
fmt.Println(value2)
close(ch2)
}
|
package role
// Available roles.
const (
System = "SYSTEM"
Admin = "ADMIN"
Create = "CREATE"
Write = "WRITE"
Read = "READ"
)
|
package main
import (
"crypto/rand"
"encoding/hex"
"fmt"
"flag"
)
var (
size = flag.Int("bytes", 4, "How many bytes should be read. The number of characters in the output will be twice this value.")
)
func main() {
flag.Parse()
buffer := make([]byte, *size)
_, err := rand.Read(buffer)
if err != nil {
panic(err)
}
fmt.Printf("%s\n", hex.EncodeToString(buffer))
}
|
package main
import (
"encoding/json"
"fmt"
"time"
)
// OnlineInit runs infinit loop to send online users count to all clients
func onlineInit(hub *Hub) {
message := make([]interface{}, 2)
ticker := time.NewTicker(10 * time.Second)
quit := make(chan struct{})
for {
select {
case <-ticker.C:
message[0] = "online"
users := getStats(hub).Users
message[1] = users
stackPutKeyValues("ws.online", users)
msg, err := json.Marshal(message)
if err != nil {
fmt.Println("Error on json encode online stats", err)
} else {
hub.broadcast <- msg
}
case <-quit:
ticker.Stop()
return
}
}
}
|
package jsonschema
import (
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
)
// ExtV1CRDOpenAPIV3Schema defines the schema for
func ExtV1CRDOpenAPIV3Schema() extv1.JSONSchemaProps {
properties := map[string]extv1.JSONSchemaProps{
"apiVersion": StringProp,
"kind": StringProp,
}
return extv1.JSONSchemaProps{
Type: Object,
Properties: properties,
Required: []string{"apiVersion", "kind"},
// trigger saving the raw payload as a special column
XEmbeddedResource: true,
}
}
|
package main
import (
"bufio"
"bytes"
"io"
)
// Scanner represents a lexical scanner
type Scanner struct {
r *bufio.Reader
}
// NewScanner returns a new instance of Scanner.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
// read reads the next rune from the bufferred reader.
// Returns the rune(0) if an error occurs (or io.EOF is returned)
func (s *Scanner) read() rune {
r, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return r
}
func (s *Scanner) unread() { _ = s.r.UnreadRune() }
// Scan returns the next token and literal value.
func (s *Scanner) Scan() (t Token, lit string) {
// read the next rune.
r := s.read()
sr := string(r)
// if we see whitespace then consume all contiguous whitespace.
// if we see a letter then consume as an identifier keyword word.
if isWhitespace(r) {
s.unread()
return s.scanWhitespace()
} else if isLetter(r) {
s.unread()
t, lit = s.scanIdentifier()
if !isKeyword(lit) {
return
}
sr = lit
} else if isDigit(r) {
s.unread()
return s.scanDigit()
}
// rune cases
switch r {
case eof:
return EOF, ""
case '+':
// check +\ and +/ cases
r1 := s.read()
if r1 == '\\' {
return Operator, "+\\"
} else if r1 == '/' {
return Operator, "+/"
}
s.unread()
return Operator, string(r)
case '-':
return Operator, string(r)
case '/':
return Operator, string(r)
case '*':
r1 := s.read()
// todo(santiaago): is there a more intelligent way of doing this?
if r1 == '*' {
return Operator, "**"
} else if r1 == '\\' {
return Operator, "*\\"
} else if r1 == '/' {
return Operator, "*/"
}
s.unread()
return Operator, string(r)
case '=':
return Assign, string(r)
}
// keyword cases
switch sr {
case "max":
return Operator, sr
case "min":
return Operator, sr
}
return Error, string(r)
}
// scanWhitespace consumes the current rune and all contiguous whitespace.
func (s *Scanner) scanWhitespace() (t Token, lit string) {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent whitespace character into the bufer.
// non whitespace characters and EOF will cause the loop to exit.
for {
if r := s.read(); r == eof {
break
} else if !isWhitespace(r) {
s.unread()
break
} else {
buf.WriteRune(r)
}
}
return Space, buf.String()
}
// scanIdentifier consumes the current rune and all contiguous identifier runes.
func (s *Scanner) scanIdentifier() (t Token, lit string) {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent identifier into the buffer.
// Non indentifier characters and EOF will cause the loop to exit.
for {
if r := s.read(); r == eof {
break
} else if !isLetter(r) && !isDigit(r) && r != '_' {
s.unread()
break
} else {
_, _ = buf.WriteRune(r)
}
}
return Identifier, buf.String()
}
// scanNumber consumes the current rune and all contiguous digit runes.
func (s *Scanner) scanDigit() (tok Token, lit string) {
// Create a buffer and rtead the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent digit into the buffer.
// Non digit characters and EOF will cause the loop to exit.
for {
if r := s.read(); r == eof {
break
} else if !isDigit(r) {
s.unread()
break
} else {
_, _ = buf.WriteRune(r)
}
}
return Number, buf.String()
}
// eof rune to treat EOF like any other character
var eof = rune(0)
// isWhitespace determines if the rune passed as param corresponds to a whitespace.
// whitespace are ' ', '\t' and '\n'
func isWhitespace(r rune) bool {
return r == ' ' || r == '\t' || r == '\n'
}
// isLetter determines if the rune passed as param corresponds to a letter.
// whitespace are a-zA-Z
func isLetter(r rune) bool {
return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z')
}
// isDigit determines if the rune passed as param corresponds to a digit.
// whitespace are 0-9
func isDigit(r rune) bool {
return (r >= '0' && r <= '9')
}
func isKeyword(s string) bool {
return (s == "max") || (s == "min")
}
func isUnary(s string) bool {
return (s == "+\\") || (s == "+/") || (s == "*\\") || (s == "*/")
}
|
package tag
// List is Qiita tag list(Set 100 tags in order of frequency of appearance)
var List [100]string = [100]string{"Python",
"JavaScript",
"Ruby",
"Rails",
"PHP",
"AWS",
"iOS",
"Java",
"Docker",
"Swift",
"Android",
"Linux",
"初心者",
"Node.js",
"Python3",
"Git",
"C#",
"Unity",
"Mac",
"Go",
"CSS",
"MySQL",
"Vue.js",
"機械学習",
"C++",
"HTML",
"Laravel",
"React",
"Windows",
"GitHub",
"Xcode",
"Ubuntu",
"RaspberryPi",
"TypeScript",
"CentOS",
"DeepLearning",
"jQuery",
"Bash",
"MacOSX",
"Vagrant",
"Vim",
"Kotlin",
"Objective-C",
"WordPress",
"kubernetes",
"VSCode",
"Azure",
"#migrated",
"Firebase",
"SQL",
"R",
"Django",
"Heroku",
"PostgreSQL",
"Slack",
"lambda",
"HTML5",
"C",
"TensorFlow",
"nginx",
"Windows10",
"Scala",
"Angular",
"centos7",
"IoT",
"ShellScript",
"EC2",
"docker-compose",
"Excel",
"GoogleAppsScript",
"gcp",
"Arduino",
"SSH",
"MachineLearning",
"OpenCV",
"Ansible",
"Apache",
"api",
"PowerShell",
"nuxt.js",
"AtCoder",
"ポエム",
"Chrome",
"新人プログラマ応援",
"AndroidStudio",
"VirtualBox",
"Rust",
"競技プログラミング",
"Flutter",
"JSON",
"oracle",
"Qiita",
"iPhone",
"pandas",
"数学",
"Emacs",
"S3",
"Elixir",
"npm",
"VBA",
}
|
package main
import (
"fmt"
student ".."
)
func main() {
student.Raid1b(5, 3)
fmt.Println()
student.Raid1e(0, 1)
fmt.Println()
student.Raid1e(0, 1)
fmt.Println()
student.Raid1e(6, 0)
fmt.Println()
student.Raid1e(0, -6)
}
|
package main
import . "leetcode"
func main() {
}
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isEvenOddTree(root *TreeNode) bool {
var ans []int
var travel func(root *TreeNode, level int) bool
travel = func(root *TreeNode, level int) bool {
if root == nil {
return true
}
if level%2 == 0 {
if root.Val%2 != 1 {
// 奇偶逻辑不匹配
return false
}
if len(ans) <= level {
ans = append(ans, root.Val) // 为空直接塞进去
} else {
if root.Val <= ans[level] {
return false // 单调性不满足
}
ans[level] = root.Val // 每一层只需要记录一个值
}
} else {
if root.Val%2 != 0 {
// 奇偶逻辑不匹配
return false
}
if len(ans) <= level {
ans = append(ans, root.Val)
} else {
if root.Val >= ans[level] {
return false // 单调性不满足
}
ans[level] = root.Val // 每一层只需要记录一个值
}
}
return travel(root.Left, level+1) && travel(root.Right, level+1) // 左右子树都要为真
}
return travel(root, 0)
}
|
package modules
import (
"context"
"fmt"
"regexp"
"time"
"github.com/astaxie/beego"
beegoContext "github.com/astaxie/beego/context"
"github.com/sirupsen/logrus"
elastic "gopkg.in/olivere/elastic.v5"
elogrus "gopkg.in/sohlich/elogrus.v2"
)
const (
HTTP_LOG_NAME = "http"
)
var (
esClient *elastic.Client
esIndexPrefix string
)
func SetEsIndexPrefix(setEsIndexPrefix string) {
esIndexPrefix = setEsIndexPrefix
}
// AccessLogRecord struct for holding access log data.
type AccessLogRecord struct {
RemoteAddr string `json:"remote_addr"`
RequestTime time.Time `json:"request_time"`
RequestMethod string `json:"request_method"`
Request string `json:"request"`
ServerProtocol string `json:"server_protocol"`
Host string `json:"host"`
Status int `json:"status"`
BodyBytesSent int64 `json:"body_bytes_sent"`
ElapsedTime float64 `json:"elapsed_time"`
HTTPReferrer string `json:"http_referrer"`
HTTPUserAgent string `json:"http_user_agent"`
RemoteUser string `json:"remote_user"`
}
func InitLogsToES(logLevel logrus.Level, esURL string, httpLog bool, excludURLs ...string) {
initLogs(logLevel, esURL, httpLog, excludURLs...)
}
func InitLogs(logLevel logrus.Level) {
initLogs(logLevel, "", false)
}
func initLogs(logLevel logrus.Level, esURL string, httpLog bool, excludURLs ...string) {
logrus.SetLevel(logLevel)
if esURL != "" {
var err error
if esClient, err = elastic.NewClient(elastic.SetURL(esURL), elastic.SetSniff(false)); err != nil {
panic(err)
}
go runESFlush()
}
if httpLog {
initHTTPLog(excludURLs...)
}
}
func NewLCircleESLogger(logLevel logrus.Level, esURL string, esIndex string) (*logrus.Logger, error) {
logger := logrus.New()
if esURL != "" && esIndex != "" {
hostname := GetHostname()
client, err := elastic.NewClient(elastic.SetURL(esURL), elastic.SetSniff(false))
if err != nil {
return nil, err
}
hook, err := elogrus.NewElasticHook(client, hostname, logLevel, esIndex)
if err != nil {
return nil, err
}
logger.AddHook(hook)
}
return logger, nil
}
func genIndex(typeName string) string {
return fmt.Sprintf("%s-%s", esIndexPrefix, typeName)
}
func initHTTPLog(excludURLs ...string) {
beego.InsertFilter("*", beego.BeforeRouter, func(context *beegoContext.Context) {
context.Input.SetData("startTime", time.Now())
}, false)
beego.InsertFilter("*", beego.FinishRouter, func(context *beegoContext.Context) {
r := context.Request
if matched, _ := regexp.MatchString("^/system/status$", r.RequestURI); matched {
return
}
for _, excludURLs := range excludURLs {
if matched, _ := regexp.MatchString(excludURLs, r.RequestURI); matched {
return
}
}
startTime := context.Input.GetData("startTime").(time.Time)
statusCode := context.ResponseWriter.Status
if statusCode == 0 {
statusCode = 200
}
timeDur := time.Since(startTime)
put(genIndex(HTTP_LOG_NAME), "accesslog", &AccessLogRecord{
RemoteAddr: context.Input.IP(),
RequestTime: startTime,
RequestMethod: r.Method,
Request: fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto),
ServerProtocol: r.Proto,
Host: r.Host,
Status: statusCode,
ElapsedTime: timeDur.Seconds(),
HTTPReferrer: r.Header.Get("Referer"),
HTTPUserAgent: r.Header.Get("User-Agent"),
RemoteUser: r.Header.Get("Remote-User"),
BodyBytesSent: int64(len(context.Input.RequestBody)),
})
}, false)
}
func put(index string, esType string, data interface{}) error {
if esClient != nil {
logrus.
WithField("index", index).
WithField("esType", esType).
Debug("Send event log to elasticsearch")
_, err := esClient.Index().Index(index).Type(esType).BodyJson(data).Do(context.Background())
return err
}
return nil
}
func runESFlush() {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for {
select {
case <-t.C:
if _, err := esClient.Flush().Do(context.Background()); err != nil {
logrus.WithError(err).Error("runESFlush")
}
}
}
}
|
//go:build js
package checkbox
import (
"net/url"
"strings"
"github.com/gopherjs/gopherjs/js"
"github.com/shurcooL/go/gopherjs_http/jsutil"
"honnef.co/go/js/dom"
)
func init() {
js.Global.Set("CheckboxOnChange", jsutil.Wrap(CheckboxOnChange))
}
func CheckboxOnChange(event dom.Event, object dom.HTMLElement, defaultValue bool, queryParameter string) {
rawQuery := strings.TrimPrefix(dom.GetWindow().Location().Search, "?")
query, _ := url.ParseQuery(rawQuery)
inputElement := object.(*dom.HTMLInputElement)
selected := inputElement.Checked
if selected == defaultValue {
query.Del(queryParameter)
} else {
query.Set(queryParameter, "")
}
dom.GetWindow().Location().Search = "?" + query.Encode()
}
|
package main
import (
"fmt"
"sort"
"github.com/jnewmano/advent2020/input"
"github.com/jnewmano/advent2020/output"
)
func main() {
answer := partb()
fmt.Println(answer)
}
func partb() interface{} {
// input.SetRaw((raw2))
var things = input.LoadSliceInt("")
things = append(things, 0) // add the starting node
sort.Ints(things)
paths := map[int]int{
0: 1, // seed the starting node with 1
}
for _, v := range things {
for _, k := range things {
if k <= v {
continue
}
if k-v > 3 { // if it's too far to the next one, don't add it as a valid path
continue
}
// add number of existing paths to get to the neighbor node
paths[k] += paths[v]
}
}
// return the number of paths to the last adapter
last := output.High(things)
return paths[last]
}
var _ = output.High(nil)
var raw1 = `16
10
15
5
1
11
7
19
6
12
4
`
var raw = `28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3`
|
package diffiehellman
import (
"math/big"
"math/rand"
"time"
)
const testVersion = 1
func PrivateKey(p *big.Int) *big.Int {
source := rand.New(rand.NewSource(time.Now().UnixNano()))
return new(big.Int).Add(big.NewInt(2), new(big.Int).Rand(source, new(big.Int).Sub(p, big.NewInt(2))))
}
func PublicKey(private, p *big.Int, g int64) *big.Int {
gBigInt := big.NewInt(g)
return new(big.Int).Exp(gBigInt, private, p)
}
func NewPair(p *big.Int, g int64) (*big.Int, *big.Int) {
private := PrivateKey(p)
return private, PublicKey(private, p, g)
}
func SecretKey(private1, public2, p *big.Int) *big.Int {
return new(big.Int).Exp(public2, private1, p)
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package table
import (
"errors"
"fmt"
"bscp.io/pkg/criteria/enumor"
"bscp.io/pkg/criteria/validator"
"bscp.io/pkg/runtime/selector"
)
// GroupColumns defines Group's columns
var GroupColumns = mergeColumns(GroupColumnDescriptor)
// GroupColumnDescriptor is Group's column descriptors.
var GroupColumnDescriptor = mergeColumnDescriptors("",
ColumnDescriptors{{Column: "id", NamedC: "id", Type: enumor.Numeric}},
mergeColumnDescriptors("spec", GroupSpecColumnDescriptor),
mergeColumnDescriptors("attachment", GroupAttachmentColumnDescriptor),
mergeColumnDescriptors("revision", RevisionColumnDescriptor))
// Group defines a group for an app to publish.
// it contains the selector to define the scope of the matched instances.
type Group struct {
// ID is an auto-increased value, which is a unique identity of a group.
ID uint32 `db:"id" json:"id" gorm:"primaryKey"`
Spec *GroupSpec `db:"spec" json:"spec" gorm:"embedded"`
Attachment *GroupAttachment `db:"attachment" json:"attachment" gorm:"embedded"`
Revision *Revision `db:"revision" json:"revision" gorm:"embedded"`
}
// TableName is the group's database table name.
func (g Group) TableName() string {
return "groups"
}
// AppID AuditRes interface
func (g Group) AppID() uint32 {
return 0
}
// ResID AuditRes interface
func (g Group) ResID() uint32 {
return g.ID
}
// ResType AuditRes interface
func (g Group) ResType() string {
return "app"
}
// ValidateCreate validate group is valid or not when create it.
func (g Group) ValidateCreate() error {
if g.ID > 0 {
return errors.New("id should not be set")
}
if g.Spec == nil {
return errors.New("spec not set")
}
if err := g.Spec.ValidateCreate(); err != nil {
return err
}
if g.Attachment == nil {
return errors.New("attachment not set")
}
if err := g.Attachment.Validate(); err != nil {
return err
}
if g.Revision == nil {
return errors.New("revision not set")
}
if err := g.Revision.ValidateCreate(); err != nil {
return err
}
return nil
}
// ValidateUpdate validate group is valid or not when update it.
func (g Group) ValidateUpdate() error {
if g.ID <= 0 {
return errors.New("id should be set")
}
changed := false
if g.Spec != nil {
changed = true
if err := g.Spec.ValidateUpdate(); err != nil {
return err
}
}
if g.Attachment == nil {
return errors.New("attachment should be set")
}
if g.Attachment.BizID <= 0 {
return errors.New("biz id should be set")
}
if !changed {
return errors.New("nothing is found to be change")
}
if g.Revision == nil {
return errors.New("revision not set")
}
if err := g.Revision.ValidateUpdate(); err != nil {
return err
}
return nil
}
// ValidateDelete validate the group's info when delete it.
func (g Group) ValidateDelete() error {
if g.ID <= 0 {
return errors.New("group id should be set")
}
if g.Attachment.BizID <= 0 {
return errors.New("biz id should be set")
}
return nil
}
// GroupSpecColumns defines GroupSpec's columns
var GroupSpecColumns = mergeColumns(GroupSpecColumnDescriptor)
// GroupSpecColumnDescriptor is GroupSpec's column descriptors.
var GroupSpecColumnDescriptor = ColumnDescriptors{
{Column: "name", NamedC: "name", Type: enumor.String},
{Column: "public", NamedC: "public", Type: enumor.Boolean},
{Column: "mode", NamedC: "mode", Type: enumor.String},
{Column: "selector", NamedC: "selector", Type: enumor.String},
{Column: "uid", NamedC: "uid", Type: enumor.String},
}
// GroupSpec defines all the specifics for group set by user.
type GroupSpec struct {
Name string `db:"name" json:"name"`
// Public defines weather group can be used by all apps.
// It can not be updated once it is created.
Public bool `db:"public" json:"public" gorm:"column:public"`
Mode GroupMode `db:"mode" json:"mode" gorm:"column:mode"`
Selector *selector.Selector `db:"selector" json:"selector" gorm:"column:selector;type:json"`
UID string `db:"uid" json:"uid" gorm:"column:uid"`
}
const (
// Custom means this is a user customed group, it's selector is defined by user
Custom GroupMode = "custom"
// Debug means that this group can noly set UID,
// in other word can only select specific instance
Debug GroupMode = "debug"
// Default will select instances that won't be selected by any other released groups
Default GroupMode = "default"
// BuiltIn define bscp built-in group,eg. ClusterID, Namespace, CMDBModuleID...
// Note: BuiltIn define bscp built-in group,eg. ClusterID, Namespace, CMDBModuleID...
BuiltIn GroupMode = "builtin"
)
// GroupMode is the mode of an group works in
type GroupMode string
// String returns the string value of GroupMode.
func (g GroupMode) String() string {
return string(g)
}
// Validate strategy set type.
func (g GroupMode) Validate() error {
switch g {
case Custom:
case Debug:
case Default:
default:
return fmt.Errorf("unsupported group working mode: %s", g)
}
return nil
}
// ValidateCreate validate group spec when it is created.
func (g GroupSpec) ValidateCreate() error {
if err := validator.ValidateName(g.Name); err != nil {
return err
}
if err := g.Mode.Validate(); err != nil {
return err
}
switch g.Mode {
case Custom:
if g.Selector == nil || g.Selector.IsEmpty() {
return errors.New("group works in custom mode, selector should be set")
}
case Debug:
if g.UID == "" {
return errors.New("group works in debug mode, uid should be set")
}
default:
return fmt.Errorf("unsupported group working mode: %s", g.Mode.String())
}
return nil
}
// ValidateUpdate validate group spec when it is updated.
func (g GroupSpec) ValidateUpdate() error {
if err := validator.ValidateName(g.Name); err != nil {
return err
}
if g.Mode != "" {
return errors.New("group's mode can not be updated")
}
return nil
}
// GroupAttachmentColumns defines GroupAttachment's columns
var GroupAttachmentColumns = mergeColumns(GroupAttachmentColumnDescriptor)
// GroupAttachmentColumnDescriptor is GroupAttachment's column descriptors.
var GroupAttachmentColumnDescriptor = ColumnDescriptors{
{Column: "biz_id", NamedC: "biz_id", Type: enumor.Numeric}}
// GroupAttachment defines the group attachments.
type GroupAttachment struct {
BizID uint32 `db:"biz_id" json:"biz_id" gorm:"column:biz_id"`
}
// IsEmpty test whether group attachment is empty or not.
func (g GroupAttachment) IsEmpty() bool {
return g.BizID == 0
}
// Validate whether group attachment is valid or not.
func (g GroupAttachment) Validate() error {
if g.BizID <= 0 {
return errors.New("invalid attachment biz id")
}
return nil
}
|
type RecentCounter struct {
queue []int
}
func Constructor() RecentCounter {
return RecentCounter{}
}
func (this *RecentCounter) Ping(t int) int {
temp := *this
temp.queue = append(temp.queue, t)
for temp.queue[0] < t - 3000 {
temp.queue = temp.queue[1:]
}
*this = temp
return len(temp.queue)
}
/**
* Your RecentCounter object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Ping(t);
*/
|
package condiments
type Whip struct {
description string
cost int
}
func NewWhip() Whip {
return Whip{
description: "Whip",
cost: 30,
}
}
func (wp Whip)GetDescription(beverageDescription func() string) func() string {
return func() string {
return beverageDescription() +" "+ wp.description
}
}
func (wp Whip)Cost(beverageCost func() int) func() int {
return func() int {
return beverageCost() + wp.cost
}
}
|
package utils
import "encoding/xml"
// 通过xml解析的结构体,des必须是引用
func Copy(src, des interface{})bool{
bs,err:=xml.Marshal(src)
if err!=nil {
return false
}
err = xml.Unmarshal(bs,des)
if err!=nil {
return false
}
return true
}
|
package tasks
import (
"reflect"
"testing"
)
func TestNewTask(t *testing.T) {
task_name := "TestTask"
tsk := NewTask(task_name)
if reflect.TypeOf(tsk).String() != "tasks.Task" {
t.Fatalf(`NewTask("%s") did not return a "Task" type.`, task_name)
}
if tsk.Name != task_name {
t.Fatalf(`NewTask("%s") set task name as "%s" instead of "%s".`,
task_name,
tsk.Name,
task_name)
}
}
|
package main
import (
"encoding/csv"
"log"
"os"
"strconv"
"io"
//"fmt"
)
func saveToFile(locOfCities map[string]Location) {
f, _ := os.Create("geodb")
w := csv.NewWriter(f)
for key, value := range locOfCities {
csvRecord := []string{key, strconv.FormatFloat(value.Lat, 'f', -1, 64), strconv.FormatFloat(value.Lng, 'f', -1, 64)}
if err := w.Write(csvRecord); err != nil {
log.Fatalln("error writing record to csv:", err)
}
}
// Write any buffered data to the underlying writer (standard output).
w.Flush()
if err := w.Error(); err != nil {
log.Fatal(err)
}
}
func readFromFile() map[string]Location {
f, _ := os.Open("geodb")
r := csv.NewReader(f)
cityCoords := make(map[string]Location)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
lat, _ := strconv.ParseFloat(record[1], 64)
lng, _ := strconv.ParseFloat(record[2], 64)
cityCoords[record[0]] = Location{lat, lng}
//fmt.Println("city ", record[0], " lat " , record[1], " lng ", record[2])
}
return cityCoords
}
|
package scsprotov1
func (c *scsv1) GetRunningProcesses() int {
return c.concurrentRoutinesPool.AvailablePermits()
}
|
package repositories_test
import (
"context"
"testing"
"github.com/syncromatics/kafmesh/internal/graph/model"
"gotest.tools/assert"
)
func Test_Component_Services(t *testing.T) {
repo := repos.Component()
r, err := repo.ServicesByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, []*model.Service{
&model.Service{
ID: 1,
Name: "service1",
Description: "service1 description",
},
&model.Service{
ID: 1,
Name: "service1",
Description: "service1 description",
},
&model.Service{
ID: 2,
Name: "service2",
Description: "service2 description",
},
&model.Service{
ID: 2,
Name: "service2",
Description: "service2 description",
},
})
}
func Test_Component_Processors(t *testing.T) {
repo := repos.Component()
r, err := repo.ProcessorsByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.Processor{
[]*model.Processor{
&model.Processor{ID: 1, Name: "processor1", Description: "processor1 description"},
&model.Processor{ID: 2, Name: "processor2", Description: "processor2 description"},
},
[]*model.Processor{
&model.Processor{ID: 3, Name: "processor3", Description: "processor3 description"},
&model.Processor{ID: 4, Name: "processor4", Description: "processor4 description"},
},
[]*model.Processor{},
[]*model.Processor{},
})
}
func Test_Component_Sinks(t *testing.T) {
repo := repos.Component()
r, err := repo.SinksByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.Sink{
[]*model.Sink{
&model.Sink{ID: 1, Name: "sink1", Description: "sink1 description"},
&model.Sink{ID: 2, Name: "sink2", Description: "sink2 description"},
},
[]*model.Sink{
&model.Sink{ID: 3, Name: "sink3", Description: "sink3 description"},
&model.Sink{ID: 4, Name: "sink4", Description: "sink4 description"},
},
[]*model.Sink{},
[]*model.Sink{},
})
}
func Test_Component_Sources(t *testing.T) {
repo := repos.Component()
r, err := repo.SourcesByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.Source{
[]*model.Source{
&model.Source{ID: 1},
&model.Source{ID: 2},
},
[]*model.Source{
&model.Source{ID: 3},
&model.Source{ID: 4},
},
[]*model.Source{},
[]*model.Source{},
})
}
func Test_Component_ViewSinks(t *testing.T) {
repo := repos.Component()
r, err := repo.ViewSinksByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.ViewSink{
[]*model.ViewSink{
&model.ViewSink{ID: 1, Name: "viewSink1", Description: "viewSink1 description"},
&model.ViewSink{ID: 2, Name: "viewSink2", Description: "viewSink2 description"},
},
[]*model.ViewSink{
&model.ViewSink{ID: 3, Name: "viewSink3", Description: "viewSink3 description"},
&model.ViewSink{ID: 4, Name: "viewSink4", Description: "viewSink4 description"},
},
[]*model.ViewSink{},
[]*model.ViewSink{},
})
}
func Test_Component_ViewSources(t *testing.T) {
repo := repos.Component()
r, err := repo.ViewSourcesByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.ViewSource{
[]*model.ViewSource{
&model.ViewSource{ID: 1, Description: "viewSource1 description", Name: "viewSource1"},
&model.ViewSource{ID: 2, Description: "viewSource2 description", Name: "viewSource2"},
},
[]*model.ViewSource{
&model.ViewSource{ID: 3, Description: "viewSource3 description", Name: "viewSource3"},
&model.ViewSource{ID: 4, Description: "viewSource4 description", Name: "viewSource4"},
},
[]*model.ViewSource{},
[]*model.ViewSource{},
})
}
func Test_Component_Views(t *testing.T) {
repo := repos.Component()
r, err := repo.ViewsByComponents(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.View{
[]*model.View{
&model.View{ID: 1},
&model.View{ID: 2},
},
[]*model.View{
&model.View{ID: 3},
&model.View{ID: 4},
},
[]*model.View{
&model.View{ID: 5},
},
[]*model.View{},
})
}
func Test_Component_DependsOn(t *testing.T) {
repo := repos.Component()
r, err := repo.DependsOn(context.Background(), []int{1, 2, 3, 4})
assert.NilError(t, err)
assert.DeepEqual(t, r, [][]*model.Component{
[]*model.Component{
&model.Component{ID: 2, Name: "component2", Description: "component2 description"},
},
[]*model.Component{
&model.Component{ID: 1, Name: "component1", Description: "component1 description"},
},
[]*model.Component{
&model.Component{ID: 1, Name: "component1", Description: "component1 description"},
&model.Component{ID: 2, Name: "component2", Description: "component2 description"},
},
[]*model.Component{},
})
}
|
package main
import (
"fmt"
)
// Go includes the built-in error interface defined as
/*
type error interface {
Error () string
}
So, any value that satisfies this interface can be used wherever errors are used.
*/
//
type GreetingError struct {
Who string
}
func (ge *GreetingError) Error() string {
return fmt.Sprintf("Tried to say hello to %v, but cant.", ge.Who)
}
func SayHello(who string) (ok bool, err *GreetingError) {
switch {
case who == "World":
ok = true
case who == "world":
ok = true
default:
ok = false
err = &GreetingError{who}
}
return ok, err
}
func main() {
if ok, err := SayHello("Moon"); ok == true {
fmt.Println("Said hello, error ", err)
} else {
fmt.Println(err.Error())
}
}
|
package parser
import (
"fmt"
"strings"
"github.com/emptyland/akino/sql/ast"
"github.com/emptyland/akino/sql/token"
)
func ParseCommand(cmd string) (ast.Command, error) {
var p Parser
return p.Init(cmd).NextStatement()
}
func ParseExpression(expr string) (ast.Expr, error) {
var p Parser
return p.Init(expr).NextExpr()
}
type Parser struct {
cmd string
lah tokeniton // look a head
lex *token.Lexer
}
func (self *Parser) Init(cmd string) *Parser {
self.cmd = cmd
self.lex = token.NewLexer(cmd)
self.skip()
return self
}
func (self *Parser) NextStatement() (ast.Command, error) {
var cmd ast.Command
var err error
if cmd, err = self.Next(); err != nil {
return nil, err
}
if self.peek() != token.EOF {
if _, err = self.match(token.SEMI); err != nil {
return nil, err
}
}
return cmd, nil
}
func (self *Parser) Next() (ast.Command, error) {
switch self.peek() {
case token.BEGIN, token.START, token.COMMIT, token.ROLLBACK, token.END:
return self.parseTransaction(self.peek())
case token.ILLEGAL:
return nil, self.errorf("Token") // TODO
case token.SHOW:
return self.parseShow()
case token.SELECT:
return self.parseSelect()
case token.CREATE:
return self.parseCreate()
case token.INSERT, token.REPLACE:
return self.parseInsert()
case token.UPDATE:
return self.parseUpdate()
case token.DELETE:
return self.parseDelete()
default:
return nil, self.errorf(`Unknown command: "%s"`, self.peekLiteral())
}
}
func (self *Parser) parseTransaction(op token.Token) (ast.Command, error) {
cmd := &ast.Transaction{
TransactionPos: self.peekPos(),
Op: op,
Type: token.ILLEGAL,
}
self.skip()
// Parse transaction type
if cmd.Op == token.BEGIN || cmd.Op == token.START {
cmd.Type = self.parseTransactionType()
}
// Parse transaction option
switch self.peek() {
case token.EOF:
return cmd, nil
case token.TRANSACTION:
self.skip()
return cmd, nil
default:
return cmd, nil
}
}
func (self *Parser) parseTransactionType() token.Token {
rv := token.DEFERRED
switch self.peek() {
case token.DEFERRED, token.IMMEDIATE, token.EXCLUSIVE:
rv = self.peek()
self.skip()
default:
rv = token.DEFERRED
}
return rv
}
func (self *Parser) parseShow() (ast.Command, error) {
cmd := &ast.Show{
ShowPos: self.peekPos(),
}
self.skip() // skip "SHOW"
switch self.peek() {
case token.DATABASES, token.TABLES:
cmd.Dest = self.peek()
self.skip()
return cmd, nil
default:
return nil, self.errorf(`Bad show command, unexpected: "%s"`, self.peekLiteral())
}
}
func (self *Parser) parseCreate() (ast.Command, error) {
self.skip() // skip `CREATE'
switch self.peek() {
case token.TABLE:
return self.parseCreateTable(false)
case token.TEMP:
self.skip()
return self.parseCreateTable(true)
case token.INDEX:
return self.parseCreateIndex(false)
case token.UNIQUE:
self.skip()
return self.parseCreateIndex(true)
default:
return nil, self.errorf(`Bad create statement, unexpected "%s"`, self.peek().String())
}
}
//------------------------------------------------------------------------------
// Create Table Actions:
//------------------------------------------------------------------------------
//
// CreateTable ::= `CREATE' Temp `TABLE' IfNotExists NameRef CreateTableArgs
//
// Temp ::= `TEMP'
// |
//
// IfNotExists ::= `IF' `NOT' `EXISTS'
// |
//
//
func (self *Parser) parseCreateTable(temp bool) (*ast.CreateTable, error) {
var err error
cmd := &ast.CreateTable{
CreatePos: self.peekPos(),
Temp: temp,
IfNotExists: false,
}
if _, err = self.match(token.TABLE); err != nil {
return nil, err
}
if self.test(token.IF) {
if err = self.batchMatch(token.NOT, token.EXISTS); err != nil {
return nil, err
}
cmd.IfNotExists = true
}
if cmd.Table, err = self.parseNameRef(); err != nil {
return nil, err
}
switch self.peek() {
case token.LPAREN:
self.skip()
if cmd.Scheme, err = self.parseColumnScheme(cmd); err != nil {
return nil, err
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
case token.AS:
self.skip()
if cmd.Template, err = self.parseSelect(); err != nil {
return nil, err
}
default:
return nil, self.errorf(`No table scheme be specified, unexpected "%s"`, self.peek().String())
}
cmd.CreateEnd = self.peekPos()
return cmd, nil
}
//
// ColumnScheme ::= ColumnScheme `,' ColumnDefine
//
// ColumnDefine ::= Identifer TypeDecl ColumnOptionList
//
// ColumnOptionList ::= ColumnOptionList ColumnOption
// | ColumnOption
// |
//
func (self *Parser) parseColumnScheme(cmd *ast.CreateTable) ([]ast.ColumnDefine, error) {
scheme := make([]ast.ColumnDefine, 0)
for {
def := ast.ColumnDefine{
NotNullOn: token.DEFAULT,
UniqueOn: token.DEFAULT,
PrimaryKeyOn: token.DEFAULT,
}
var err error
if def.Name, err = self.parseName(); err != nil {
return scheme, err
}
var decl *ast.Type
if decl, err = self.parseType(); err != nil {
return scheme, err
} else {
def.ColumnType = *decl
}
var ok bool
if ok, err = self.parseColumnOption(cmd, &def); err != nil {
return scheme, err
}
for ok {
if ok, err = self.parseColumnOption(cmd, &def); err != nil {
return scheme, err
}
}
scheme = append(scheme, def)
if !self.test(token.COMMA) {
break
}
if ok, err = self.parseColDefOption(cmd, scheme); err != nil {
return scheme, err
}
if ok {
for self.test(token.COMMA) {
if ok, err = self.parseColDefOption(cmd, scheme); err != nil {
return scheme, err
}
if !ok {
return scheme, self.errorf(`Bad column define, unexpected "%s"`, self.peek().String())
}
}
break
}
}
return scheme, nil
}
//
// ColumnOption ::= `DEFAULT' Literal
// | `DEFAULT' `(' Expr `)'
// | `DEFAULT' Identifier
// | `NULL' OnConf
// | `NOT' `NULL' OnConf
// | `PRIMARY' `KEY' SortOrder OnConf AutoIncr
// | `UNIQUE' OnConf
// | `CHECK' `(' Expr `)'
// | `COLLATE' Identifier
// |
//
// AutoIncr ::= `AUTOINCR'
// |
//
func (self *Parser) parseColumnOption(cmd *ast.CreateTable, def *ast.ColumnDefine) (bool, error) {
var err error
switch self.peek() {
case token.DEFAULT:
self.skip()
if self.peek() == token.LPAREN {
self.skip()
if def.Default, err = self.NextExpr(); err != nil {
return false, err
}
if _, err = self.match(token.RPAREN); err != nil {
return false, err
}
} else {
if def.Default, err = self.NextExpr(); err != nil {
return false, err
}
}
return true, nil
case token.NULL:
self.skip()
return true, nil
case token.NOT:
self.skip()
if _, err = self.match(token.NULL); err != nil {
return false, err
}
if def.NotNullOn, err = self.parseOnConf(); err != nil {
return false, err
}
def.NotNull = true
return true, nil
case token.PRIMARY:
self.skip()
if _, err = self.match(token.KEY); err != nil {
return false, err
}
if self.test(token.ASC) {
def.PrimaryKeyDesc = false
} else if self.test(token.DESC) {
def.PrimaryKeyDesc = true
}
if def.PrimaryKeyOn, err = self.parseOnConf(); err != nil {
return false, err
}
if self.test(token.AUTOINCR) {
def.AutoIncr = true
}
def.PrimaryKey = true
return true, nil
case token.UNIQUE:
self.skip()
if def.UniqueOn, err = self.parseOnConf(); err != nil {
return false, err
}
def.Unique = true
return true, nil
case token.CHECK:
self.skip()
if _, err = self.match(token.LPAREN); err != nil {
return false, nil
}
var expr ast.Expr
if expr, err = self.NextExpr(); err != nil {
return false, nil
}
if _, err = self.match(token.RPAREN); err != nil {
return false, nil
}
cmd.CheckConstraint = append(cmd.CheckConstraint, expr)
return true, nil
case token.COLLATE:
self.skip()
if def.Collate, err = self.parseName(); err != nil {
return false, nil
} else {
return true, nil
}
default:
return false, nil
}
}
//
// ColDefOption ::= `PRIMARY' `KEY' `(' IdxDefList AutoIncr `)' OnConf
// | `UNIQUE' `(' IdxDefList `)' OnConf
// | `CHECK' `(' Expr `)'
//
//
func (self *Parser) parseColDefOption(cmd *ast.CreateTable, scheme []ast.ColumnDefine) (bool, error) {
var err error
switch self.peek() {
case token.PRIMARY:
self.skip()
if err = self.batchMatch(token.KEY, token.LPAREN); err != nil {
return false, err
}
var list []ast.IndexDefine
if list, err = self.parseIdxDefList(); err != nil {
return false, err
}
var autoincr bool
if self.test(token.AUTOINCR) {
autoincr = true
} else {
autoincr = false
}
if _, err = self.match(token.RPAREN); err != nil {
return false, err
}
var onconf token.Token
if onconf, err = self.parseOnConf(); err != nil {
return false, err
}
travelColumnDefine(list, scheme, func(idx *ast.IndexDefine, def *ast.ColumnDefine) {
def.AutoIncr = autoincr
def.PrimaryKeyDesc = idx.Desc
def.PrimaryKeyOn = onconf
def.Collate = idx.Collate
})
return true, nil
case token.UNIQUE:
self.skip()
if _, err = self.match(token.LPAREN); err != nil {
return false, err
}
var list []ast.IndexDefine
if list, err = self.parseIdxDefList(); err != nil {
return false, err
}
if _, err = self.match(token.RPAREN); err != nil {
return false, err
}
var onconf token.Token
if onconf, err = self.parseOnConf(); err != nil {
return false, err
}
travelColumnDefine(list, scheme, func(idx *ast.IndexDefine, def *ast.ColumnDefine) {
def.Unique = true
def.UniqueOn = onconf
})
return true, nil
case token.CHECK:
self.skip()
if _, err = self.match(token.LPAREN); err != nil {
return false, err
}
var expr ast.Expr
if expr, err = self.NextExpr(); err != nil {
return false, err
}
if _, err = self.match(token.RPAREN); err != nil {
return false, err
}
cmd.CheckConstraint = append(cmd.CheckConstraint, expr)
return true, nil
default:
return false, nil
}
}
func travelColumnDefine(idx []ast.IndexDefine, scheme []ast.ColumnDefine,
closure func(idx *ast.IndexDefine, def *ast.ColumnDefine)) {
for i := 0; i < len(idx); i++ {
for j := 0; j < len(scheme); j++ {
if idx[i].Name == scheme[j].Name {
closure(&idx[i], &scheme[j])
}
}
}
}
//
// OnConf ::= `ON' `CONFLICT' Resolve
// |
//
// Resolve ::= Raise
// | `IGNORE'
// | `DEFAULT'
// | `REPLACE'
//
// Raise ::= `ROLLBACK'
// | `ABORT'
// | `FAIL'
//
func (self *Parser) parseOnConf() (token.Token, error) {
if !self.test(token.ON) {
return token.DEFAULT, nil
}
var err error
if _, err = self.match(token.CONFLICT); err != nil {
return token.ILLEGAL, err
}
conf := self.peek()
switch conf {
case token.IGNORE, token.DEFAULT, token.REPLACE, token.ROLLBACK, token.ABORT,
token.FAIL:
self.skip()
return conf, nil
default:
return token.ILLEGAL, self.errorf(`Bad "ON CONFLICT" option`)
}
}
//------------------------------------------------------------------------------
// Create Index Actions:
//------------------------------------------------------------------------------
//
// CreateIndex ::= `CREATE' UniqueFlag `INDEX' IfNotExists NameRef `ON' `(' IdxDefList `)'
//
// UniqueFlag ::= `UNIQUE'
// |
//
func (self *Parser) parseCreateIndex(unique bool) (*ast.CreateIndex, error) {
cmd := &ast.CreateIndex{
CreatePos: self.peekPos(),
Unique: unique,
}
var err error
if _, err = self.match(token.INDEX); err != nil {
return nil, err
}
if self.test(token.IF) {
if err = self.batchMatch(token.NOT, token.EXISTS); err != nil {
return nil, err
}
cmd.IfNotExists = true
}
if cmd.Name, err = self.parseNameRef(); err != nil {
return nil, err
}
if _, err = self.match(token.ON); err != nil {
return nil, err
}
if cmd.Table, err = self.parseName(); err != nil {
return nil, err
}
if _, err = self.match(token.LPAREN); err != nil {
return nil, err
}
if cmd.Index, err = self.parseIdxDefList(); err != nil {
return nil, err
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
cmd.CreateEnd = self.peekPos()
return cmd, nil
}
func (self *Parser) parseIdxDefList() ([]ast.IndexDefine, error) {
idx := make([]ast.IndexDefine, 0)
parse_idx_def := func(p *Parser) (ast.IndexDefine, error) {
var err error
var def ast.IndexDefine
if def.Name, err = p.parseName(); err != nil {
return def, err
}
if p.test(token.COLLATE) {
if def.Collate, err = p.parseName(); err != nil {
return def, err
}
}
if p.test(token.ASC) {
def.Desc = false
} else if p.test(token.DESC) {
def.Desc = true
}
return def, nil
}
if elem, err := parse_idx_def(self); err != nil {
return idx, err
} else {
idx = append(idx, elem)
}
for self.test(token.COMMA) {
if elem, err := parse_idx_def(self); err != nil {
return idx, err
} else {
idx = append(idx, elem)
}
}
return idx, nil
}
//------------------------------------------------------------------------------
// Insert Actions:
//------------------------------------------------------------------------------
//
// Insert ::= InsertPrefix `VALUES' `(' ExprList `)'
// | InsertPrefix Select
// | InsertPrefix `DEFAULT' `VALUES'
//
// InsertPrefix ::= InsCmd `INTO' NameRef InsColList
//
// InsCmd ::= `INSERT' OrConf
// | `REPLACE'
//
// InsColList ::= `(' IdentifierList `)'
// |
//
func (self *Parser) parseInsert() (*ast.Insert, error) {
cmd := &ast.Insert{
InsertPos: self.peekPos(),
Op: token.DEFAULT,
Column: make([]ast.Identifier, 0),
Item: make([]ast.Expr, 0),
}
var err error
switch self.peek() {
case token.INSERT:
self.skip()
if cmd.Op, err = self.parseOrConf(); err != nil {
return nil, err
}
case token.REPLACE:
self.skip()
cmd.Op = token.REPLACE
default:
panic("No reached!")
}
if _, err = self.match(token.INTO); err != nil {
return nil, err
}
if cmd.Dest, err = self.parseNameRef(); err != nil {
return nil, err
}
if self.test(token.LPAREN) {
if cmd.Column, err = self.parseIdentifierList(); err != nil {
return nil, err
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
}
switch self.peek() {
case token.SELECT:
if cmd.From, err = self.parseSelect(); err != nil {
return nil, err
}
case token.VALUES:
self.skip()
if _, err = self.match(token.LPAREN); err != nil {
return nil, err
}
if cmd.Item, err = self.parseExprList(); err != nil {
return nil, err
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
case token.DEFAULT:
self.skip()
if _, err = self.match(token.VALUES); err != nil {
return nil, err
}
default:
return nil, self.errorf(`Insert statement need values, unexpected "%s"`, self.peek().String())
}
cmd.InsertEnd = self.peekPos()
return cmd, nil
}
func (self *Parser) parseOrConf() (token.Token, error) {
if !self.test(token.OR) {
return token.DEFAULT, nil
}
conf := self.peek()
switch conf {
case token.IGNORE, token.DEFAULT, token.REPLACE, token.ROLLBACK, token.ABORT,
token.FAIL:
self.skip()
return conf, nil
default:
return token.ILLEGAL, self.errorf(`Bad "OR" option`)
}
}
//------------------------------------------------------------------------------
// Update Actions:
//------------------------------------------------------------------------------
//
// Update ::= `UPDATE' OrConf NameRef Indexed `SET' SetList Where OrderBy Limit
//
// SetList ::= SetList `,' SetDefine
// | SetDefine
//
func (self *Parser) parseUpdate() (*ast.Update, error) {
cmd := &ast.Update{
UpdatePos: self.peekPos(),
Set: make([]ast.SetDefine, 0),
}
self.skip() // skip `UPDATE'
var err error
if cmd.Op, err = self.parseOrConf(); err != nil {
return nil, err
}
if cmd.Dest, err = self.parseNameRef(); err != nil {
return nil, err
}
if cmd.Indexed, err = self.parseIndexed(); err != nil {
return nil, err
}
if _, err = self.match(token.SET); err != nil {
return nil, err
}
var def ast.SetDefine
if def, err = self.parseSetDefine(); err != nil {
return nil, err
} else {
cmd.Set = append(cmd.Set, def)
}
for self.test(token.COMMA) {
if def, err = self.parseSetDefine(); err != nil {
return nil, err
} else {
cmd.Set = append(cmd.Set, def)
}
}
if self.test(token.WHERE) {
if cmd.Where, err = self.NextExpr(); err != nil {
return nil, err
}
}
if self.test(token.ORDER) {
if _, err = self.match(token.BY); err != nil {
return nil, err
}
if cmd.OrderBy, err = self.parseOrderBy(); err != nil {
return nil, err
}
}
if self.test(token.LIMIT) {
if cmd.Limit, cmd.Offset, err = self.parseLimitOffset(); err != nil {
return nil, err
}
}
cmd.UpdateEnd = self.peekPos()
return cmd, nil
}
//
// SetDefine ::= Identifier `=' Expr
//
func (self *Parser) parseSetDefine() (ast.SetDefine, error) {
var def ast.SetDefine
var err error
if def.Column, err = self.parseName(); err != nil {
return def, err
}
if _, err = self.match(token.EQ); err != nil {
return def, err
}
if def.Value, err = self.NextExpr(); err != nil {
return def, err
}
return def, nil
}
//------------------------------------------------------------------------------
// Delete Actions:
//------------------------------------------------------------------------------
//
// Delete ::= `DELETE' `FROM' NameRef Indexed Where OrderBy Limit
//
func (self *Parser) parseDelete() (*ast.Delete, error) {
cmd := &ast.Delete{
DeletePos: self.peekPos(),
}
var err error
if err = self.batchMatch(token.DELETE, token.FROM); err != nil {
return nil, err
}
if cmd.Dest, err = self.parseNameRef(); err != nil {
return nil, err
}
if cmd.Indexed, err = self.parseIndexed(); err != nil {
return nil, err
}
if self.test(token.WHERE) {
if cmd.Where, err = self.NextExpr(); err != nil {
return nil, err
}
}
if self.test(token.ORDER) {
if _, err = self.match(token.BY); err != nil {
return nil, err
}
if cmd.OrderBy, err = self.parseOrderBy(); err != nil {
return nil, err
}
}
if self.test(token.LIMIT) {
if cmd.Limit, cmd.Offset, err = self.parseLimitOffset(); err != nil {
return nil, err
}
}
cmd.DeleteEnd = self.peekPos()
return cmd, nil
}
//------------------------------------------------------------------------------
// Select Statement Actions:
//------------------------------------------------------------------------------
//
// Select ::= Select SetOp SingleSelect
// | SingleSelect
//
// SetOp ::= `UNION'
// | `UNION' `ALL'
// | `EXCEPT'
// | `INTERSECT'
//
// SingleSelect ::= `SELECT' Distinct SelColList From Where GroupBy Having OrderBy Limit
//
// Distinct ::= `DISTINCT'
// | `ALL'
// |
//
func (self *Parser) parseSelect() (*ast.Select, error) {
cmd := &ast.Select{
SelectPos: self.peekPos(),
}
self.skip()
if self.peek() == token.DISTINCT {
self.skip()
cmd.Distinct = true
} else if self.peek() == token.ALL {
self.skip()
cmd.Distinct = false
}
var err error
if cmd.SelColList, err = self.parseSelColList(); err != nil {
return nil, err
}
if self.test(token.FROM) {
if cmd.From, err = self.parseSelTabList(); err != nil {
return nil, err
}
}
if self.test(token.WHERE) {
if cmd.Where, err = self.NextExpr(); err != nil {
return nil, err
}
}
if self.test(token.GROUP) {
if _, err = self.match(token.BY); err != nil {
return nil, err
}
if cmd.GroupBy, err = self.parseExprList(); err != nil {
return nil, err
}
}
if self.test(token.HAVING) {
if cmd.Having, err = self.NextExpr(); err != nil {
return nil, err
}
}
if self.test(token.ORDER) {
if _, err = self.match(token.BY); err != nil {
return nil, err
}
if cmd.OrderBy, err = self.parseOrderBy(); err != nil {
return nil, err
}
}
if self.test(token.LIMIT) {
if cmd.Limit, cmd.Offset, err = self.parseLimitOffset(); err != nil {
return nil, err
}
}
// End of select statement
cmd.SelectEnd = self.peekPos()
switch self.peek() {
case token.UNION:
self.skip()
if self.test(token.ALL) {
cmd.Op = token.UNION_ALL
} else {
cmd.Op = token.UNION
}
case token.EXCEPT:
self.skip()
cmd.Op = token.EXCEPT
case token.INTERSECT:
self.skip()
cmd.Op = token.INTERSECT
}
if cmd.Op != 0 {
if cmd.Prior, err = self.parseSelect(); err != nil {
return nil, err
}
}
return cmd, nil
}
//
// SelColList ::= SelColList `,' SelectColumn
// | SelectColumn
//
// SelectColumn ::= Expr
// | Expr `AS' Identifer
// | `*'
//
func (self *Parser) parseSelColList() ([]ast.SelectColumn, error) {
column := make([]ast.SelectColumn, 0)
for {
var elem ast.SelectColumn
if self.peek() == token.STAR {
expr := &ast.Literal{
ValuePos: self.peekPos(),
Value: self.peekLiteral(),
Kind: self.peek(),
}
self.skip()
elem.SelectExpr = expr
elem.Alias = ""
} else {
if expr, err := self.NextExpr(); err != nil {
return column, err
} else {
elem.SelectExpr = expr
}
if self.peek() == token.AS {
var err error
if elem.Alias, err = self.parseAliasName(); err != nil {
return column, err
}
}
}
column = append(column, elem)
if !self.test(token.COMMA) {
break
}
}
return column, nil
}
//
// AliasName ::= `AS' Identifer
//
func (self *Parser) parseAliasName() (string, error) {
if _, err := self.match(token.AS); err != nil {
return "", err
}
return self.parseName()
}
func (self *Parser) parseName() (string, error) {
if lah, err := self.match(token.ID); err != nil {
return "", err
} else {
return strings.Trim(lah.Literal, "`"), nil
}
}
//
// SelTabList ::= SelTabList JoinOp Source
// | Source
//
// JoinOP ::= `,'
// | `LEFT' `OUTER' `JOIN'
// | `LEFT' `JOIN'
// | `RIGHT' `OUTER' `JOIN'
// | `RIGHT' `JOIN'
// | `FULL' `OUTER' `JOIN'
// | `JOIN'
// | `INNER' `JOIN'
// | `CROSS' `JOIN'
// | `NATURAL' `JOIN'
//
// Source ::= Name Alias Indexed On Using
// | `(' Select `)'' Alias On Using
//
func (self *Parser) parseSelTabList() ([]ast.Source, error) {
source := make([]ast.Source, 0)
for {
var err error
var elem ast.Source
elem.SourcePos = self.peekPos()
if self.test(token.LPAREN) {
if elem.Subquery, err = self.parseSelect(); err != nil {
return source, err
}
if _, err = self.match(token.RPAREN); err != nil {
return source, err
}
} else {
var name ast.NameRef
if name, err = self.parseNameRef(); err != nil {
return source, nil
}
elem.Table = &name
}
if self.test(token.AS) || self.peek() == token.ID {
if elem.Alias, err = self.parseName(); err != nil {
return source, err
}
}
if elem.Table != nil {
if elem.Indexed, err = self.parseIndexed(); err != nil {
return source, err
}
}
if self.test(token.USING) {
if _, err = self.match(token.LPAREN); err != nil {
return source, err
}
if elem.Using, err = self.parseIdentifierList(); err != nil {
return source, err
}
if _, err = self.match(token.RPAREN); err != nil {
return source, err
}
}
elem.SourceEnd = self.peekPos()
if elem.JoinType, err = self.parseJoinType(); err != nil {
return source, err
}
if self.test(token.ON) {
if _, err = self.match(token.LPAREN); err != nil {
return source, err
}
if elem.On, err = self.NextExpr(); err != nil {
return source, err
}
if _, err = self.match(token.RPAREN); err != nil {
return source, err
}
}
source = append(source, elem)
if elem.JoinType == 0 {
break
}
}
return source, nil
}
func (self *Parser) parseIndexed() (string, error) {
var err error
indexed := ""
switch self.peek() {
case token.INDEXED:
self.skip()
if _, err = self.match(token.BY); err != nil {
return "", err
}
if indexed, err = self.parseName(); err != nil {
return "", err
}
case token.NOT:
self.skip()
if _, err = self.match(token.INDEXED); err != nil {
return "", err
}
}
return indexed, nil
}
func (self *Parser) parseJoinType() (int, error) {
if self.test(token.COMMA) {
return ast.JT_INNER, nil
}
jt := 0
for {
switch self.peek() {
case token.INNER:
self.skip()
jt |= ast.JT_INNER
case token.CROSS:
self.skip()
jt |= ast.JT_CROSS
case token.NATURAL:
self.skip()
jt |= ast.JT_NATURAL
case token.LEFT:
self.skip()
jt |= ast.JT_LEFT
case token.RIGHT:
self.skip()
jt |= ast.JT_RIGHT
case token.OUTER:
self.skip()
jt |= ast.JT_OUTER
case token.JOIN:
if jt == 0 {
jt = ast.JT_INNER
}
self.skip()
return jt, nil
default:
return 0, nil
}
}
}
func (self *Parser) parseOrderBy() ([]ast.OrderByItem, error) {
item := make([]ast.OrderByItem, 0)
for {
var elem ast.OrderByItem
if expr, err := self.NextExpr(); err != nil {
return item, err
} else {
elem.Item = expr
}
if self.test(token.ASC) {
elem.Desc = false
} else if self.test(token.DESC) {
elem.Desc = true
}
item = append(item, elem)
if !self.test(token.COMMA) {
return item, nil
}
}
}
//
// LimitOffset ::= `LIMIT' IntLiteral
// | `LIMIT' IntLiteral `,' IntLiteral
// | `LIMIT' IntLiteral `OFFSET' IntLiteral
//
func (self *Parser) parseLimitOffset() (ast.Expr, ast.Expr, error) {
var limit, offset ast.Expr
var err error
if limit, err = self.parseIntLiteral(); err != nil {
return nil, nil, err
}
switch self.peek() {
case token.COMMA:
self.skip()
offset = limit
limit, err = self.parseIntLiteral()
return limit, offset, err
case token.OFFSET:
self.skip()
offset, err = self.parseIntLiteral()
return limit, offset, err
default:
return limit, nil, nil
}
}
//------------------------------------------------------------------------------
// Expression Actions:
//------------------------------------------------------------------------------
func (self *Parser) NextExpr() (ast.Expr, error) {
_, expr, err := self.parseExpr(0)
return expr, err
}
func (self *Parser) parseExpr(limit int) (token.Token, ast.Expr, error) {
var expr ast.Expr
var err error
if self.peek().Prefix() {
unary := &ast.UnaryExpr{
OpPos: self.peekPos(),
Op: self.peek(),
}
self.skip()
if _, unary.Operand, err = self.parseExpr(kPrioPrefix); err != nil {
return token.ILLEGAL, nil, err
}
expr = unary
} else {
if expr, err = self.parseSimple(); err != nil {
return token.ILLEGAL, nil, err
}
}
next:
op := self.peek()
for op.Binary() && priority(op).Lhs > limit {
binary := &ast.BinaryExpr{
OpPos: self.peekPos(),
Op: op,
Lhs: expr,
}
self.skip()
switch op {
case token.IN:
if binary.Rhs, err = self.parseWhereInSet(); err != nil {
return token.ILLEGAL, nil, err
}
op = self.peek()
case token.LIKE:
if self.peek() != token.STRING_LITERAL {
return token.ILLEGAL, nil, self.errorf("LIKE operator need string pattern")
}
binary.Rhs = &ast.Literal{
ValuePos: self.peekPos(),
Value: self.peekLiteral(),
Kind: self.peek(),
}
self.skip()
op = self.peek()
default:
if op, binary.Rhs, err = self.parseExpr(priority(op).Rhs); err != nil {
return token.ILLEGAL, nil, err
}
}
expr = binary
}
if op.Postfix() && kPrioPostfix > limit {
if expr, err = self.parsePostfix(expr); err != nil {
return token.ILLEGAL, nil, err
}
goto next
}
return op, expr, err
}
//
// WhereInSet ::= `(' SelectStatement `)'
// | `(' ExprList `)'
func (self *Parser) parseWhereInSet() (ast.Expr, error) {
_, err := self.match(token.LPAREN)
if err != nil {
return nil, err
}
var expr ast.Expr
if self.peek() == token.SELECT {
if expr, err = self.parseSelect(); err != nil {
return nil, err
}
} else {
var list []ast.Expr
if list, err = self.parseExprList(); err != nil {
return nil, err
}
expr = ast.ExprList(list)
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
return expr, nil
}
func (self *Parser) parseSimple() (ast.Expr, error) {
switch self.peek() {
case token.NULL, token.INT_LITERAL, token.FLOAT_LITERAL, token.STRING_LITERAL:
expr := &ast.Literal{
ValuePos: self.peekPos(),
Value: self.peekLiteral(),
Kind: self.peek(),
}
self.skip()
return expr, nil
case token.CASE:
return self.parseCondition()
case token.CAST:
return self.parseCast()
default:
return self.parseSuffixed()
}
}
func (self *Parser) parseCondition() (ast.Expr, error) {
cond := &ast.Condition{
OpPos: self.peekPos(),
Blocks: make([]ast.ConditionBlock, 0),
}
self.skip()
var err error
if self.peek() != token.WHEN {
if cond.Case, err = self.NextExpr(); err != nil {
return nil, err
}
}
var block ast.ConditionBlock
for self.test(token.WHEN) {
if block.When, err = self.NextExpr(); err != nil {
return nil, err
}
if _, err = self.match(token.THEN); err != nil {
return nil, err
}
if block.Then, err = self.NextExpr(); err != nil {
return nil, err
}
cond.Blocks = append(cond.Blocks, block)
}
if len(cond.Blocks) == 0 {
return nil, self.errorf(`WHEN ... THEN ... block not found`)
}
if self.peek() == token.ELSE {
self.skip()
if cond.Else, err = self.NextExpr(); err != nil {
return nil, err
}
}
return cond, nil
}
//
// CastExpr ::= `CAST' `(' Expr `AS' Type `)'
func (self *Parser) parseCast() (*ast.CastExpr, error) {
cast := &ast.CastExpr{
OpPos: self.peekPos(),
}
self.skip() // skip `CAST'
var err error
if _, err = self.match(token.LPAREN); err != nil {
return nil, err
}
if cast.Operand, err = self.NextExpr(); err != nil {
return nil, err
}
if _, err = self.match(token.AS); err != nil {
return nil, err
}
var ty *ast.Type
if ty, err = self.parseType(); err != nil {
return nil, err
}
cast.To = *ty
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
return cast, nil
}
//
// TypeDecl ::= Type Sign
// | Type `(' IntLiteral `)' Sign
// | Type `(' IntLiteral `,' IntLiteral `)' Sign
//
// Sign ::= `UNSIGNED'
// |
//
// Type ::= `TINYINT'
// | `SMALLINT'
// | `INT'
// | ...
func (self *Parser) parseType() (*ast.Type, error) {
if self.peek().Kind() != token.TT_KEYWORD {
return nil, self.errorf(`"%s" not type!`, self.peek().String())
}
decl := &ast.Type{
TokenPos: self.peekPos(),
Kind: self.peek(),
Unsigned: false,
}
self.skip()
if self.peek() == token.LPAREN {
self.skip()
var err error
if decl.Width, err = self.parseIntLiteral(); err != nil {
return nil, err
}
if self.peek() == token.COMMA {
self.skip()
if decl.Decimal, err = self.parseIntLiteral(); err != nil {
return nil, err
}
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
}
if self.peek() == token.UNSIGNED {
self.skip()
decl.Unsigned = true
}
return decl, nil
}
func (self *Parser) parseIntLiteral() (*ast.Literal, error) {
lah, err := self.match(token.INT_LITERAL)
if err != nil {
return nil, err
}
return &ast.Literal{
ValuePos: lah.Pos,
Value: lah.Literal,
Kind: lah.Token,
}, nil
}
func (self *Parser) parseSuffixed() (ast.Expr, error) {
id, err := self.parsePrimary()
if err != nil {
return nil, err
}
if isDot(id) {
return id, nil
}
if self.peek() == token.LPAREN {
self.skip()
call := &ast.CallExpr{
Func: *(id.(*ast.Identifier)),
Args: make([]ast.Expr, 0),
Distinct: false,
}
if self.peek() == token.STAR {
star := &ast.Literal{
ValuePos: self.peekPos(),
Value: self.peekLiteral(),
Kind: self.peek(),
}
self.skip()
call.Args = append(call.Args, star)
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
return call, err
}
if self.peek() == token.DISTINCT {
self.skip()
call.Distinct = true
}
if self.peek() != token.RPAREN {
if call.Args, err = self.parseExprList(); err != nil {
return nil, err
}
}
if _, err = self.match(token.RPAREN); err != nil {
return nil, err
}
return call, nil
}
return id, nil
}
func (self *Parser) parsePrimary() (ast.Expr, error) {
var expr ast.Expr
var err error
switch self.peek() {
case token.LPAREN:
self.skip()
if expr, err = self.NextExpr(); err != nil {
return nil, err
} else if _, err = self.match(token.RPAREN); err != nil {
return nil, err
} else {
return expr, nil
}
case token.ID:
if expr, err = self.parseIdentifier(); err != nil {
return nil, err
}
if self.peek() == token.DOT {
bin := &ast.BinaryExpr{
OpPos: self.peekPos(),
Op: token.DOT,
Lhs: expr,
}
self.skip()
if expr, err = self.parseIdentifier(); err != nil {
return nil, err
}
bin.Rhs = expr
expr = bin
}
return expr, nil
default:
return nil, self.errorf(`Unexpected expression, expected "%s"`, self.peekLiteral())
}
}
func (self *Parser) parseExprList() ([]ast.Expr, error) {
list := make([]ast.Expr, 0)
expr, err := self.NextExpr()
if err != nil {
return list, err
}
list = append(list, expr)
for self.test(token.COMMA) {
expr, err = self.NextExpr()
if err != nil {
return list, err
}
list = append(list, expr)
}
return list, nil
}
func (self *Parser) parseIdentifier() (*ast.Identifier, error) {
tok, err := self.match(token.ID)
if err != nil {
return nil, err
} else {
return &ast.Identifier{
NamePos: tok.Pos,
Name: tok.Literal,
}, nil
}
}
func (self *Parser) parsePostfix(expr ast.Expr) (ast.Expr, error) {
switch self.peek() {
case token.IS:
unary := &ast.UnaryExpr{
OpPos: self.peekPos(),
Operand: expr,
}
self.skip()
if self.peek() == token.NOT {
self.skip()
unary.Op = token.IS_NOT_NULL
} else {
unary.Op = token.IS_NULL
}
if _, err := self.match(token.NULL); err != nil {
return nil, err
}
return unary, nil
default:
return expr, nil
}
}
func (self *Parser) parseIdentifierList() ([]ast.Identifier, error) {
id := make([]ast.Identifier, 0)
for {
var elem ast.Identifier
if lah, err := self.match(token.ID); err != nil {
return id, err
} else {
elem.NamePos = lah.Pos
elem.Name = lah.Literal
}
id = append(id, elem)
if !self.test(token.COMMA) {
break
}
}
return id, nil
}
func (self *Parser) parseNameRef() (ast.NameRef, error) {
var name ast.NameRef
if lah, err := self.match(token.ID); err != nil {
return name, err
} else {
name.First = strings.Trim(lah.Literal, "`")
}
if self.test(token.DOT) {
if lah, err := self.match(token.ID); err != nil {
return name, err
} else {
name.Second = strings.Trim(lah.Literal, "`")
}
}
return name, nil
}
func (self *Parser) errorf(s string, a ...interface{}) error {
switch self.peek() {
case token.ILLEGAL:
return fmt.Errorf("[%d] Illegal token: %v", self.peekPos(), self.lex.Error())
case token.EOF:
return fmt.Errorf("Command already end")
default:
return fmt.Errorf(`[%d] %s`, self.peekPos(), fmt.Sprintf(s, a...))
}
}
func (self *Parser) peek() token.Token {
return self.lah.Token
}
func (self *Parser) peekPos() int {
return self.lah.Pos
}
func (self *Parser) peekLiteral() string {
return self.lah.Literal
}
func (self *Parser) test(exp token.Token) bool {
if self.peek() == exp {
self.skip()
return true
} else {
return false
}
}
func (self *Parser) skip() {
self.lah.Pos, self.lah.Token, self.lah.Literal = self.lex.Next()
}
func (self *Parser) batchMatch(list ...token.Token) error {
for _, elem := range list {
if _, err := self.match(elem); err != nil {
return err
}
}
return nil
}
func (self *Parser) match(exp token.Token) (tokeniton, error) {
var prev tokeniton
if self.peek() != exp {
return prev, self.errorf(`Unexpected "%s", expected "%s"`, exp, self.peekLiteral())
}
prev = self.lah
self.skip()
return prev, nil
}
func priority(op token.Token) priorition {
prio, found := prio[op]
if !found {
panic(fmt.Sprintf("Op(%s) not found", op))
}
return prio
}
func isDot(expr ast.Expr) bool {
bin, ok := expr.(*ast.BinaryExpr)
if !ok {
return false
}
return bin.Op == token.DOT
}
type tokeniton struct {
Token token.Token
Pos int
Literal string
}
type priorition struct {
Lhs int
Rhs int
}
const (
kPrioPrefix = 9
kPrioPostfix = 1
)
var prio = map[token.Token]priorition{
token.LIKE: priorition{8, 8},
token.STAR: priorition{7, 7},
token.SLASH: priorition{7, 7},
token.PLUS: priorition{6, 6},
token.MINUS: priorition{6, 6},
token.IN: priorition{5, 5},
token.NE: priorition{4, 4},
token.EQ: priorition{4, 4},
token.LT: priorition{3, 3},
token.LE: priorition{3, 3},
token.GT: priorition{3, 3},
token.GE: priorition{3, 3},
token.AND: priorition{2, 2},
token.OR: priorition{1, 1},
}
|
package test
// 定义一个结构体,首字母小写,则只能同包级别访问
type student struct {
id int
name string
}
// 定义一个结构体,首字母大写,则其它包级别可以访问
type Student struct {
// 定义的成员变量,首字母小写,则只能同包级别访问
id int
// 定义的成员变量,首字母大写,则其它包级别可以访问
Name string
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"strings"
"time"
"github.com/kataras/iris"
)
const (
adapterMetadata = "http://adapter-metadata.default.svc.cluster.local"
adapterExtension = "http://adapter-extension.default.svc.cluster.local"
)
// Timeseries : Timeseries structure with timeseriesId and metadataIds
type Timeseries struct {
TimeseriesID string `json:"timeseriesId"`
ModuleID string `json:"moduleId"`
ValueType string `json:"valueType"`
ParameterID string `json:"parameterId"`
LocationID string `json:"locationId"`
TimeseriesType string `json:"timeseriesType"`
TimeStepID string `json:"timeStepId"`
}
// Extensions blabla
type Extensions []struct {
ExtensionID string `json:"extensionId"`
Extension string `json:"extension"`
Function string `json:"function"`
Data struct {
InputVariables []string `json:"inputVariables"`
OutputVariables []string `json:"outputVariables"`
Variables []struct {
Timeseries Timeseries `json:"timeseries"`
VariableID string `json:"variableId"`
} `json:"variables"`
} `json:"data"`
Options json.RawMessage `json:"options"`
}
// TODO: Remove
func getTimeseries(timeseriesID string, metadata *Timeseries) error {
fmt.Println("URL:", fmt.Sprint(adapterMetadata, "/timeseries/", timeseriesID))
response, err := netClient.Get(fmt.Sprint(adapterMetadata, "/timeseries/", timeseriesID))
if err != nil {
return err
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if response.StatusCode != 200 {
return fmt.Errorf("Unable to find Timeseries: %q", timeseriesID)
}
err = json.Unmarshal(body, &metadata)
return nil
}
func getExtensions(triggerType string, timeseriesID string, extensions *Extensions) error {
fmt.Println("GET Extensions:", fmt.Sprint(" triggerType:", triggerType, " timeseriesID:", timeseriesID))
path := fmt.Sprint(adapterExtension, "/extension/trigger_type/", triggerType)
if timeseriesID != "" {
path = fmt.Sprint(path, "?timeseriesId=", timeseriesID)
}
response, err := netClient.Get(path)
if err != nil {
return err
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if response.StatusCode != 200 {
return fmt.Errorf("Unable to find Timeseries: %q", timeseriesID)
}
err = json.Unmarshal(body, &extensions)
return nil
}
var tr = &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: true,
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
}
var netClient = &http.Client{
Transport: tr,
Timeout: time.Second * 10,
}
func main() {
app := iris.Default()
app.Post("/onchange/{timeseriesID:string}", func(ctx iris.Context) {
timeseriesID := ctx.Params().Get("timeseriesID")
fmt.Println("timeseriesID:", timeseriesID)
var extensions Extensions
err := getExtensions("OnChange", timeseriesID, &extensions)
if err != nil {
ctx.JSON(iris.Map{"response": err.Error()})
return
}
/** PERFORMANCE: Issolation of extension work from import/export
* Need put the requests in a queue and process later. Otherwise need to increase the timeout at import & export
* Or use pubsub which is also like queue for async processing
**/
q := ctx.Request().URL.RawQuery
// requestID := ctx.URLParam("requestId")
// start := ctx.URLParamDefault("start", "")
// Trigger each matching Extension
for _, extension := range extensions {
extensionURL := fmt.Sprint("http://extension-", strings.ToLower(extension.Extension), ".default.svc.cluster.local")
jsonValue, _ := json.Marshal(extension)
resp, err := netClient.Post(
fmt.Sprint(extensionURL, "/extension/", strings.ToLower(extension.Extension), "/trigger/", extension.ExtensionID, "?", q),
"application/json", bytes.NewBuffer(jsonValue))
if err != nil {
fmt.Println("Error: Send to extension:", extensionURL, err)
}
defer resp.Body.Close()
fmt.Println("Trigger ", extension.ExtensionID, resp.Body)
}
ctx.JSON(extensions)
})
app.Get("/public/hc", func(ctx iris.Context) {
ctx.JSON(iris.Map{
"message": "OK",
})
})
// listen and serve on http://0.0.0.0:8080.
app.Run(iris.Addr(":8080"))
}
|
package leetcode
/*You're given strings J representing the types of stones that are jewels,
and S representing the stones you have. Each character in S is a type of stone you have.
You want to know how many of the stones you have are also jewels.
The letters in J are guaranteed distinct, and all characters in J and S are letters.
Letters are case sensitive, so "a" is considered a different type of stone from "A".
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/jewels-and-stones
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
func numJewelsInStones(J string, S string) int {
rlt := 0
lenJ := len(J)
lenS := len(S)
for i := 0; i < lenJ; i++ {
for j := 0; j < lenS; j++ {
if J[i] == S[j] {
rlt++
}
}
}
return rlt
}
|
/*
* Copyright (c) 2020, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
)
var dir, _ = os.Getwd()
var path = dir + "/iamctl.json"
type ServerDetails struct {
Server string `json:"server"`
AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
}
type myJSON struct {
Array []ServerDetails
}
func createFile() {
// detect if file exists
var _, err = os.Stat(path)
// create file if not exists
if os.IsNotExist(err) {
var file, err = os.Create(path)
checkError(err)
defer file.Close()
jsonData := &myJSON{Array: []ServerDetails{}}
encodeJson, _ := json.Marshal(jsonData)
if err != nil {
log.Fatalln(err)
}
err = ioutil.WriteFile(path, encodeJson, 0644)
if err != nil {
log.Fatalln(err)
}
}
}
func writeFiles(server string, token string, refreshToken string) {
var err error
var data myJSON
var msg = new(ServerDetails)
file, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalln(err)
}
err = json.Unmarshal(file, &data)
if err != nil {
log.Fatalln(err)
}
msg.AccessToken = token
msg.Server = server
msg.RefreshToken = refreshToken
if len(data.Array) == 0 {
data.Array = append(data.Array, *msg)
} else {
for i := 0; i < len(data.Array); i++ {
if data.Array[i].Server == server {
data.Array[i].AccessToken = token
data.Array[i].RefreshToken = refreshToken
} else {
data.Array = append(data.Array, *msg)
}
}
}
jsonData, err := json.Marshal(data)
if err != nil {
log.Fatalln(err)
}
err = ioutil.WriteFile(path, jsonData, 0644)
if err != nil {
log.Fatalln(err)
} else {
fmt.Println("Authorization is done for : " + server)
}
checkError(err)
}
func readFile() string {
var a ServerDetails
var data myJSON
file, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalln(err)
}
err = json.Unmarshal(file, &data)
if err != nil {
log.Fatalln(err)
}
//as the single host this worked. For multiple host need to read relevant accessToken according to given server
for i := 0; i < len(data.Array); i++ {
a = data.Array[i]
}
return a.AccessToken
}
func checkError(err error) {
if err != nil {
fmt.Println(err.Error())
os.Exit(0)
}
}
|
package posthttpport
import (
"encoding/json"
"net/http"
"strings"
"github.com/alejogs4/blog/src/post/application"
"github.com/alejogs4/blog/src/post/domain/post"
"github.com/alejogs4/blog/src/post/infraestructure/posthttpadapter"
"github.com/alejogs4/blog/src/shared/infraestructure/httputils"
"github.com/alejogs4/blog/src/user/domain/user"
"github.com/gorilla/mux"
)
type PostControllers struct {
postCommands application.PostCommands
postQueries application.PostQueries
}
func NewPostControllers(postCommands application.PostCommands, postQueries application.PostQueries) PostControllers {
return PostControllers{postCommands: postCommands, postQueries: postQueries}
}
func (controller PostControllers) CreatePostController(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
var httpBlogPost post.Post
httpBlogPost.Content = request.FormValue("content")
httpBlogPost.Title = request.FormValue("title")
tags := strings.Split(strings.TrimSpace(request.FormValue("tags")), ",")
postTags := make([]post.Tag, len(tags))
for _, tag := range tags {
postTags = append(postTags, post.Tag{ID: tag, Content: tag})
}
httpBlogPost.Tags = postTags
userDTO, _ := request.Context().Value("user").(user.UserDTO) //nolint
userPicture, _ := request.Context().Value("file").(string)
err := controller.postCommands.CreateNewPost(userDTO.ID, httpBlogPost.Title, httpBlogPost.Content, userPicture, httpBlogPost.Tags)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(map[string]string{}, "Post created"), http.StatusCreated)
}
func (controller PostControllers) AddPostComment(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
var commentInfo struct {
Content string `json:"content"`
}
postID := mux.Vars(request)["id"]
userDTO, _ := request.Context().Value("user").(user.UserDTO)
if err := json.NewDecoder(request.Body).Decode(&commentInfo); err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
commentID, err := controller.postCommands.CreateNewComment(userDTO.ID, postID, commentInfo.Content)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
createdComment := map[string]string{"comment_id": commentID}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(createdComment, "Comment created"), http.StatusCreated)
}
func (controller PostControllers) RemoveComment(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
commentID := mux.Vars(request)["id"]
userDTO, _ := request.Context().Value("user").(user.UserDTO)
err := controller.postCommands.RemovePostComment(commentID, userDTO.ID)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(map[string]string{}, "Comment removed"), http.StatusOK)
}
func (controller PostControllers) AddPostLikeController(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
var likeInfo struct {
Type string `json:"type"`
}
postID := mux.Vars(request)["id"]
// Improvement check for errors here
userDTO, _ := request.Context().Value("user").(user.UserDTO)
err := json.NewDecoder(request.Body).Decode(&likeInfo)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
err = controller.postCommands.AddLike(userDTO.ID, postID, likeInfo.Type)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(map[string]string{}, "Ok"), http.StatusCreated)
}
func (controller PostControllers) GetPostByIDController(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
postID := mux.Vars(request)["id"]
post, err := controller.postQueries.GetPostByID(postID)
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(post, "Ok"), http.StatusOK)
}
func (controller PostControllers) GetAllPostController(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
posts, err := controller.postQueries.GetAllPosts()
if err != nil {
httpError := posthttpadapter.MapPostErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
httputils.DispatchNewResponse(response, httputils.WrapAPIResponse(posts, "Ok"), http.StatusOK)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.