text
stringlengths 11
4.05M
|
|---|
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Command analyze performs sentiment, entity, and syntax analysis
// on a string of text via the Cloud Natural Language API.
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"strings"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
language "google.golang.org/api/language/v1beta1"
)
func main() {
if len(os.Args) < 2 {
usage("Missing command.")
}
ctx := context.Background()
hc, err := google.DefaultClient(ctx, language.CloudPlatformScope)
if err != nil {
log.Fatal(err)
}
client, err := language.New(hc)
if err != nil {
log.Fatal(err)
}
text := strings.Join(os.Args[2:], " ")
if text == "" {
usage("Missing text.")
}
switch os.Args[1] {
case "entities":
printResp(analyzeEntities(client, text))
case "sentiment":
printResp(analyzeSentiment(client, text))
case "syntax":
printResp(analyzeSyntax(client, text))
default:
usage("Unknown command.")
}
}
func usage(msg string) {
fmt.Fprintln(os.Stderr, msg)
fmt.Fprintln(os.Stderr, "usage: analyze [entities|sentiment|syntax] <text>")
os.Exit(2)
}
func analyzeEntities(s *language.Service, text string) (*language.AnalyzeEntitiesResponse, error) {
req := s.Documents.AnalyzeEntities(&language.AnalyzeEntitiesRequest{
Document: &language.Document{
Content: text,
Type: "PLAIN_TEXT",
},
EncodingType: "UTF8",
})
return req.Do()
}
func analyzeSentiment(s *language.Service, text string) (*language.AnalyzeSentimentResponse, error) {
req := s.Documents.AnalyzeSentiment(&language.AnalyzeSentimentRequest{
Document: &language.Document{
Content: text,
Type: "PLAIN_TEXT",
},
})
return req.Do()
}
func analyzeSyntax(s *language.Service, text string) (*language.AnnotateTextResponse, error) {
req := s.Documents.AnnotateText(&language.AnnotateTextRequest{
Document: &language.Document{
Content: text,
Type: "PLAIN_TEXT",
},
Features: &language.Features{
ExtractSyntax: true,
},
EncodingType: "UTF8",
})
return req.Do()
}
func printResp(v interface{}, err error) {
if err != nil {
log.Fatal(err)
}
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(b)
}
|
package terraform
import (
"os"
"strings"
)
type Provider struct {
Name string
Variables map[string]interface{}
}
func NewProvider(name string) *Provider {
provider := new(Provider)
provider.Name = name
provider.Variables = make(map[string]interface{})
return provider
}
func (provider *Provider) AddVariable(key string, value interface{}) {
provider.Variables[key] = value
}
func (provider *Provider) Export() error {
file, err := os.OpenFile("main.tf", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer file.Close()
if _, err := file.WriteString(provider.stringBuilder()); err != nil {
return nil
}
return nil
}
func (provider *Provider) stringBuilder() string {
export := "provider \"" + provider.Name + "\" {"
for variable, value := range provider.Variables {
switch value.(type) {
case string:
export = export + "\n " + strings.ToLower(variable) + " = \"" + strings.ToLower(value.(string)) + "\""
case int:
export = export + "\n " + strings.ToLower(variable) + " = " + strings.ToLower(value.(string))
default:
export = export + ""
}
}
export = export + "\n}"
return export
}
|
package autoscaler
type Subnet struct {
SubnetID string `yaml:"SubnetID" validate:"required"`
AvailabilityZone string `yaml:"AvailabilityZone" validate:"required"`
}
|
package cmd
import (
"fmt"
"log"
"path/filepath"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zerostick/zerostick/daemon/watchers"
"github.com/zerostick/zerostick/daemon/web"
)
// serveCmd represents the serve command
var (
cfgListen = "0.0.0.0:8080"
serveCmd = &cobra.Command{
Use: "serve",
Short: "Starte ZeroStick web UI in listening mode",
Long: `This will start ZeroStick in web UI mode (only available atm)`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("serve called")
// Launch TeslaCam watchers
// Start inotify watcher on the TeslaCam folder
go func() {
// Create a gorutine for handling Cam files watcher
log.Println("Running cam files watcher on", filepath.Join(viper.GetString("cam-root"), "TeslaCam"))
watchers.CamfilesWatcher(filepath.Join(viper.GetString("cam-root"), "TeslaCam"))
}()
go func() {
watchers.WebfilesWatcher()
}()
// Launch web interface
web.Start()
},
}
)
func init() {
// Add command to root command
rootCmd.AddCommand(serveCmd)
// Add local options flags and bind it to viper config
serveCmd.LocalFlags().StringVarP(&cfgListen, "listen", "l", cfgListen, "hostname:port to listen on")
viper.BindPFlag("listen", serveCmd.LocalFlags().Lookup("listen"))
}
|
package main
import (
"fmt"
"math"
"math/rand"
"time"
)
func NeuralNetwork() *NN {
rand.Seed(time.Now().UnixNano())
data := make([][]float64, 3)
for i := range data {
data[i] = []float64{2*rand.NormFloat64() - 1}
}
return &NN{
SynapticWeights: data,
}
}
type NN struct {
SynapticWeights [][]float64
}
// The neural network thinks.
func (nn *NN) Think(inputs [][]float64) [][]float64 {
// Pass inputs through our neural network (our single neuron).
return nn.Sigmoid(dot(inputs, nn.SynapticWeights))
}
// The Sigmoid function, which describes an S shaped curve.
// We pass the weighted sum of the inputs through this function to
// normalise them between 0 and 1.
func (nn *NN) Sigmoid(x [][]float64) [][]float64 {
y := x[:][:]
for i := 0; i < len(y); i++ {
for j := 0; j < len(y[i]); j++ {
y[i][j] = 1 / (1 + math.Exp(-y[i][j]))
}
}
return y
}
// The derivative of the Sigmoid function.
// This is the gradient of the Sigmoid curve.
// It indicates how confident we are about the existing weight.
func (nn *NN) SigmoidDerivative(x [][]float64) [][]float64 {
y := x[:][:]
for i := 0; i < len(y); i++ {
for j := 0; j < len(y[i]); j++ {
y[i][j] = y[i][j] * (1 - y[i][j])
}
}
return y
}
//We train the neural network through a process of trial and error.
// Adjusting the synaptic weights each time.
func (nn *NN) Train(trainingSetInputs [][]float64, trainingSetOutputs [][]float64, numberOfTrainingIterations int) {
for i := 0; i < numberOfTrainingIterations; i++ {
//Pass the training set through our neural network (a single neuron).
output := nn.Think(trainingSetInputs)
// Calculate the error (The difference between the desired output
// and the predicted output).
errorLoss := difference(trainingSetOutputs, output)
// Multiply the error by the input and again by the gradient of the Sigmoid curve.
// This means less confident weights are adjusted more.
// This means inputs, which are zero, do not cause changes to the weights.
/*training_set_inputs.T, error * self.__sigmoid_derivative(output)*/
adjustment := dot(t(trainingSetInputs), multiple(errorLoss, nn.SigmoidDerivative(output)))
// Adjust the weights.
//nn.SynapticWeights += adjustment
nn.SynapticWeights = addition(nn.SynapticWeights, adjustment)
}
}
// UTILS for matrix op
// x - matrix, y - vector
func addition(x, y [][]float64) [][]float64 {
var (
lx = len(x)
lx0 = len(x[0])
z = make([][]float64, lx)
)
for i := 0; i < lx; i++ {
zI := make([]float64, lx0)
xI := x[i]
yI0 := y[i][0]
for j := 0; j < lx0; j++ {
zI[j] = xI[j] + yI0
}
z[i] = zI
}
return z
}
// x - matrix, y - vector
func difference(x, y [][]float64) [][]float64 {
var (
lx = len(x)
lx0 = len(x[0])
z = make([][]float64, lx)
)
for i := 0; i < lx; i++ {
zI := make([]float64, lx0)
xI := x[i]
yI0 := y[i][0]
for j := 0; j < lx0; j++ {
zI[j] = xI[j] - yI0
}
z[i] = zI
}
return z
}
func multiple(x, y [][]float64) [][]float64 {
var (
lx = len(x)
lx0 = len(x[0])
z = make([][]float64, lx)
)
for i := 0; i < lx; i++ {
zI := make([]float64, lx0)
xI := x[i]
yI := y[i]
for j := 0; j < lx0; j++ {
zI[j] = xI[j] * yI[j]
}
z[i] = zI
}
return z
}
func t(in [][]float64) [][]float64 {
outY := len(in)
outX := len(in[outY-1])
out := make([][]float64, outX)
for j := 0; j < outX; j++ {
out[j] = make([]float64, outY)
for i := 0; i < outY; i++ {
out[j][i] = in[i][j]
}
}
return out
}
// Matrix() * Matrix() more info http://mathinsight.org/matrix_vector_multiplication
/*
fmt.Println(dot([][]float64{
{0, 4, 4, 0},
{0, 4, 4, 0},
{0, 4, 4, 0},
}, t([][]float64{{0, 4, 4, 0}})))
output: [[32] [32] [32]]
*/
func dot(a, b [][]float64) [][]float64 {
lca := len(a[0])
lcb := len(b)
if lca != lcb {
panic(`len of rows A must be equal len of cols B(see here http://mathinsight.org/matrix_vector_multiplication)`)
}
lr := len(a)
lc := len(b[0])
out := make([][]float64, lr)
for i := 0; i < lr; i++ {
outI := make([]float64, lc)
aI := a[i]
for j := 0; j < lc; j++ {
sum := 0.0000000000000
for z := 0; z < lca; z++ {
sum += aI[z] * b[z][j]
}
outI[j] = sum
}
out[i] = outI
}
return out
}
func main() {
nn := NeuralNetwork()
fmt.Printf("Random starting synaptic weights: %v \n", nn.SynapticWeights)
// The training set. We have 4 examples, each consisting of 3 input values
// and 1 output value.
trainingSetInputs := [][]float64{
{0, 0, 1},
{1, 1, 1},
{1, 0, 1},
{0, 1, 1},
}
trainingSetOutputs := t([][]float64{
{0, 1, 1, 0},
})
now := time.Now()
// Train the neural network using a training set.
// Do it 10,000 times and make small adjustments each time.
nn.Train(trainingSetInputs, trainingSetOutputs, 10000)
fmt.Printf("New synaptic weights after training: %v\n", nn.SynapticWeights)
fmt.Printf("letancy %s\n", time.Since(now).String())
// Test the neural network with a new situation.
now = time.Now()
result := nn.Think([][]float64{{1, 0, 0}})
fmt.Printf("Considering new situation [1, 0, 0] -> ?: %v (letancy %s)\n", result, time.Since(now).String())
}
|
package main
import (
"bufio"
"bytes"
"crypto/rand"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
houndify "github.com/soundhound/houndify-sdk-go/houndify"
)
const (
// This is not the clientId. This is the app user, so many will likely exist per clientId.
// This value can be any string.
// See https://www.houndify.com/docs/ for more details.
userID = "exampleUser"
envClientIDKey = "HOUNDIFY_CLIENT_ID"
envClientKeyKey = "HOUNDIFY_CLIENT_KEY"
cliClientIDKey = "id"
cliClientKeyKey = "key"
)
func main() {
clientIDFlag := flag.String(cliClientIDKey, "", "Client ID")
clientKeyFlag := flag.String(cliClientKeyKey, "", "Client Key")
voiceFlag := flag.String("voice", "", "Audio file to use for voice query")
textFlag := flag.String("text", "", "Message to use for text query")
stdinFlag := flag.Bool("stdin", false, "Text query via stdin messages")
verboseFlag := flag.Bool("v", false, "Verbose mode, which prints raw server data")
flag.Parse()
// Make log not print out time info
log.SetFlags(0)
clientID := derefOrFetchFromEnv(clientIDFlag, envClientIDKey)
clientKey := derefOrFetchFromEnv(clientKeyFlag, envClientKeyKey)
var errsList []string
if clientID == "" {
msg := fmt.Sprintf("must set the client ID in environment variable: %q or via commmandline flag: -%s", envClientIDKey, cliClientIDKey)
errsList = append(errsList, msg)
}
if clientKey == "" {
msg := fmt.Sprintf("must set the client key in environment variable: %q or via commandline flag: -%s", envClientKeyKey, cliClientKeyKey)
errsList = append(errsList, msg)
}
if len(errsList) > 0 {
log.Fatalf("%s", strings.Join(errsList, "\n"))
}
// create a new client
client := houndify.Client{
ClientID: clientID,
ClientKey: clientKey,
Verbose: *verboseFlag,
}
client.EnableConversationState()
switch {
default:
log.Fatalf("must choose either voice, text or stdin")
case *voiceFlag != "":
// voice query
audioFilePath := *voiceFlag
fileContents, err := ioutil.ReadFile(audioFilePath)
if err != nil {
log.Fatalf("failed to read contents of file %q, err: %v", audioFilePath, err)
}
req := houndify.VoiceRequest{
AudioStream: bytes.NewReader(fileContents),
UserID: userID,
RequestID: createRequestID(),
RequestInfoFields: make(map[string]interface{}),
}
// listen for partial transcript responses
partialTranscripts := make(chan houndify.PartialTranscript)
go func() {
for partial := range partialTranscripts {
if partial.Message != "" { // ignore the "" partial transcripts, not really useful
fmt.Println(partial.Message)
}
}
}()
serverResponse, err := client.VoiceSearch(req, partialTranscripts)
if err != nil {
log.Fatalf("failed to make voice request: %v\n%s\n", err, serverResponse)
}
writtenResponse, err := houndify.ParseWrittenResponse(serverResponse)
if err != nil {
log.Fatalf("failed to decode hound response\n%s\n", serverResponse)
}
fmt.Println(writtenResponse)
case *textFlag != "":
// text query
req := houndify.TextRequest{
Query: *textFlag,
UserID: userID,
RequestID: createRequestID(),
RequestInfoFields: make(map[string]interface{}),
}
serverResponse, err := client.TextSearch(req)
if err != nil {
log.Fatalf("failed to make text request: %v\n%s\n", err, serverResponse)
}
writtenResponse, err := houndify.ParseWrittenResponse(serverResponse)
if err != nil {
log.Fatalf("failed to decode hound response\n%s\n", serverResponse)
}
fmt.Println(writtenResponse)
case *stdinFlag:
// text queries in succession, demonstrating conversation state
scanner := bufio.NewScanner(os.Stdin)
fmt.Println("Enter a text query: ")
for scanner.Scan() {
req := houndify.TextRequest{
Query: scanner.Text(),
UserID: userID,
RequestID: createRequestID(),
RequestInfoFields: make(map[string]interface{}),
}
serverResponse, err := client.TextSearch(req)
if err != nil {
fmt.Printf("failed to make text request: %v\n%s\nEnter another text query:", err, serverResponse)
continue
}
writtenResponse, err := houndify.ParseWrittenResponse(serverResponse)
if err != nil {
log.Fatalf("failed to decode hound response\n%s\n", serverResponse)
}
fmt.Print(writtenResponse, "\n\n")
fmt.Println("Enter another text query:")
}
}
}
// Creates a pseudo unique/random request ID.
//
// SDK users should do something similar so each request to the Hound server
// is signed differently to prevent replay attacks.
func createRequestID() string {
n := 10
b := make([]byte, n)
rand.Read(b)
return fmt.Sprintf("%X", b)
}
// derefOrFetchFromEnv tries to dereference and retrieve a non-empty
// string stored in the string pointer, otherwise it falls back
// to retrieving the value stored in the environment keyed by envKey.
func derefOrFetchFromEnv(strPtr *string, envKey string) string {
if strPtr != nil && *strPtr != "" {
return *strPtr
}
return os.Getenv(envKey)
}
|
package bgControllers
import (
"github.com/astaxie/beego"
"GiantTech/models"
"fmt"
"GiantTech/controllers/tools"
"strconv"
"strings"
)
type BgProjectUploadFileController struct {
beego.Controller
}
func (this *BgProjectUploadFileController) Prepare() {
s := this.StartSession()
username = s.Get("login")
beego.Informational(username)
if username == nil {
this.Ctx.Redirect(302, "/login")
}
}
func (this *BgProjectUploadFileController) Post() {
s := this.StartSession()
projectId := s.Get("ProjectId")
id, _ := strconv.Atoi(projectId.(string))
project, _ := models.GetTProjectsById(id)
user, _ := models.GetTUsersByName(username.(string))
path := beego.AppConfig.String("projectfilepath") + project.ProjectName
if file, handler, err := this.Ctx.Request.FormFile("file"); err != nil {
beego.Error(err)
fmt.Fprint(this.Ctx.ResponseWriter, err)
}else {
n := strings.Split(handler.Filename, ".")
var name string
for i := 0; i < len(n)-1; i++ {
name = name + n[i]
}
query := make(map[string]string)
query["FileProjectID"] = projectId.(string)
query["FileName.contains"] = name
files, _ := models.GetAllTProjectFile(query, nil, nil, nil, 0 ,0)
fileName, filePath, e := tools.SaveFile(file, handler, int64(len(files)), path)
if e != nil {
beego.Error(err)
fmt.Fprint(this.Ctx.ResponseWriter, err)
}else {
var fileModel models.TProjectFile
fileModel.FileName = fileName
fileModel.FilePath = filePath
fileModel.FileOwner = user.UserRealName
fileModel.FileProjectID = id
fileModel.FileCreatedTime = tools.TimeNow()
i, _ := models.AddTProjectFile(&fileModel)
tools.AddLog(user.UserRealName, tools.Addsome, tools.File, fileName + "成功")
beego.Informational(fileName, "上传成功", filePath)
var uploadResult models.UploadResult
uploadResult.Status = true
uploadResult.Data.Id = strconv.FormatInt(i, 10)
uploadResult.Message = "操作成功"
this.Data["json"] = uploadResult
this.ServeJSON()
}
}
}
func (this *BgProjectUploadFileController) Get() {
s := this.StartSession()
user, _ := models.GetTUsersByName(username.(string))
this.Data["User"] = user
i := s.Get("ProjectId")
d := i.(string)
id, _ := strconv.Atoi(d)
project, _ := models.GetTProjectsById(id)
this.Data["ProjectName"] = project.ProjectName
this.Data["ProjectId"] = id
this.TplName = "bgview/projectfileupload.html"
}
|
/*
* @lc app=leetcode.cn id=1797 lang=golang
*
* [1797] 设计一个验证系统
*/
// @lc code=start
// package leetcode
type AuthenticationManager struct {
timeToLive int
tokenMap map[string]int
}
func Constructor(timeToLive int) AuthenticationManager {
return AuthenticationManager{
timeToLive: timeToLive,
tokenMap: make(map[string]int),
}
}
func (this *AuthenticationManager) Generate(tokenId string, currentTime int) {
this.tokenMap[tokenId] = currentTime
}
func (this *AuthenticationManager) Renew(tokenId string, currentTime int) {
t, ok := this.tokenMap[tokenId]
if ok{
if t + this.timeToLive > currentTime{
this.tokenMap[tokenId] = currentTime
}else {
delete(this.tokenMap, tokenId)
}
}
}
func (this *AuthenticationManager) CountUnexpiredTokens(currentTime int) int {
unexpiredTokenCount := 0
for tokenId := range this.tokenMap {
if this.tokenMap[tokenId] + this.timeToLive > currentTime{
unexpiredTokenCount += 1
}else {
delete(this.tokenMap, tokenId)
}
}
return unexpiredTokenCount
}
/**
* Your AuthenticationManager object will be instantiated and called as such:
* obj := Constructor(timeToLive);
* obj.Generate(tokenId,currentTime);
* obj.Renew(tokenId,currentTime);
* param_3 := obj.CountUnexpiredTokens(currentTime);
*/
// @lc code=end
|
package main
import (
"fmt"
"strings"
"jblee.net/adventofcode2018/utils"
)
type dependency struct {
first, second int
}
func lines2Dep(lines []string) []dependency {
deps := make([]dependency, len(lines))
for idx, line := range lines {
deps[idx].first = int(line[5])
deps[idx].second = int(line[36])
}
return deps
}
func getStepCount(deps []dependency) int {
steps := map[int]bool{}
for _, dep := range deps {
steps[dep.first] = true
steps[dep.second] = true
}
return len(steps)
}
func stepHasDeps(step int, deps []dependency) bool {
for _, dep := range deps {
if dep.second == step {
return true
}
}
return false
}
func getNextStep(deps []dependency, sequenceSoFar string) int {
for step := int('A'); step <= int('Z'); step++ {
if strings.IndexRune(sequenceSoFar, rune(step)) > -1 {
continue
}
if !stepHasDeps(step, deps) {
return step
}
}
return -1
}
func filterStep(step int, deps []dependency) []dependency {
var newDeps []dependency
for _, dep := range deps {
if dep.first != step {
newDeps = append(newDeps, dep)
}
}
return newDeps
}
func main() {
lines := utils.ReadLinesOrDie("input.txt")
deps := lines2Dep(lines)
stepCount := getStepCount(deps)
sequence := ""
for len(sequence) < stepCount {
step := getNextStep(deps, sequence)
sequence += string(step)
deps = filterStep(step, deps)
}
fmt.Printf("sequence: %s\n", sequence)
}
|
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"periph.io/x/periph/conn/gpio"
"periph.io/x/periph/conn/gpio/gpioreg"
"periph.io/x/periph/host"
"strings"
"time"
)
var edgeTimeout time.Duration
var out_write gpio.PinIO
var out_read gpio.PinIO
var out_reserved2 gpio.PinIO
var out_reserved1 gpio.PinIO
var out_bit7 gpio.PinIO
var out_bit6 gpio.PinIO
var out_bit5 gpio.PinIO
var out_bit4 gpio.PinIO
var out_bit3 gpio.PinIO
var out_bit2 gpio.PinIO
var out_bit1 gpio.PinIO
var out_bit0 gpio.PinIO
var in_write gpio.PinIO
var in_read gpio.PinIO
var in_reserved2 gpio.PinIO
var in_reserved1 gpio.PinIO
var in_bit7 gpio.PinIO
var in_bit6 gpio.PinIO
var in_bit5 gpio.PinIO
var in_bit4 gpio.PinIO
var in_bit3 gpio.PinIO
var in_bit2 gpio.PinIO
var in_bit1 gpio.PinIO
var in_bit0 gpio.PinIO
const ReadBlockCommand = 1
const WriteBlockCommand = 2
const GetTimeCommand = 3
const ChangeDriveCommand = 4
const ExecCommand = 5
const LoadFileCommand = 6
const SaveFileCommand = 7
var debug bool = false
var workingDirectory string = "/home"
func main() {
host.Init()
initGpio()
if len(os.Args) == 3 && os.Args[2] == "--debug" {
debug = true
}
fmt.Printf("Starting Apple II RPi...\n")
fileName := os.Args[1]
file, err := os.OpenFile(fileName, os.O_RDWR, 0755)
if err != nil {
fmt.Printf("ERROR: %s", err.Error())
os.Exit(1)
}
for {
if debug {
fmt.Printf("Check for command\n")
}
command, err := readByte()
if err != nil {
//fmt.Printf("Timed out waiting for command\n")
} else {
switch command {
case ReadBlockCommand:
handleReadBlockCommand(file)
case WriteBlockCommand:
handleWriteBlockCommand(file)
case GetTimeCommand:
handleGetTimeCommand()
case ExecCommand:
handleExecCommand()
case LoadFileCommand:
handleLoadFileCommand()
}
}
}
}
func handleReadBlockCommand(file *os.File) {
blockLow, _ := readByte()
blockHigh, _ := readByte()
buffer := make([]byte, 512)
var block int64
block = int64(blockHigh)*256 + int64(blockLow)
fmt.Printf("Read block %d\n", block)
file.ReadAt(buffer, int64(block)*512)
//dumpBlock(buffer)
err := readBlock(buffer)
if err == nil {
fmt.Printf("Read block completed\n")
} else {
fmt.Printf("Failed to read block\n")
}
}
func handleWriteBlockCommand(file *os.File) {
blockLow, _ := readByte()
blockHigh, _ := readByte()
buffer := make([]byte, 512)
var block int64
block = int64(blockHigh)*256 + int64(blockLow)
fmt.Printf("Write block %d\n", block)
writeBlock(buffer)
file.WriteAt(buffer, int64(block)*512)
file.Sync()
fmt.Printf("Write block completed\n")
}
func handleExecCommand() {
fmt.Printf("Reading command to execute...\n")
linuxCommand, err := readString()
fmt.Printf("Command to run: %s\n", linuxCommand)
if strings.HasPrefix(linuxCommand, "cd /") {
workingDirectory = strings.Replace(linuxCommand, "cd ", "", 1)
writeString("Working directory set")
return
}
if strings.HasPrefix(linuxCommand, "cd ") {
workingDirectory = workingDirectory + "/" + strings.Replace(linuxCommand, "cd ", "", 1)
writeString("Working directory set")
return
}
if linuxCommand == "a2help" {
writeString("\r" +
"This is a pseudo shell. Each command is executed as a process. The cd command\r" +
"is intercepted and sets the working directory for the next command. Running\r" +
"commands that do not exit will hang. For example, do not use ping without a\r" +
"way to limit output like -c 1.\r\r")
return
}
cmd := exec.Command("bash", "-c", linuxCommand)
cmd.Dir = workingDirectory
cmdOut, err := cmd.Output()
if err != nil {
fmt.Printf("Failed to execute command\n")
writeString("Failed to execute command")
return
}
fmt.Printf("Command output: %s\n", cmdOut)
apple2string := strings.Replace(string(cmdOut), "\n", "\r", -1)
err = writeString(apple2string)
if err != nil {
fmt.Printf("Failed to send command output\n")
return
}
}
func handleGetTimeCommand() {
fmt.Printf("Sending date/time...\n")
/* 49041 ($BF91) 49040 ($BF90)
7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
DATE: | year | month | day |
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
TIME: | hour | | minute |
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
49043 ($BF93) 49042 ($BF92)
*/
now := time.Now()
year := now.Year() % 100
month := now.Month()
day := now.Day()
hour := now.Hour()
minute := now.Minute()
bf91 := (byte(year) << 1) + (byte(month) >> 3)
bf90 := ((byte(month) & 15) << 5) + byte(day)
bf93 := byte(hour)
bf92 := byte(minute)
writeByte(bf90)
writeByte(bf91)
writeByte(bf92)
writeByte(bf93)
fmt.Printf("Send time complete\n")
}
func handleLoadFileCommand() {
fileName, _ := readString()
file, err := os.OpenFile(fileName, os.O_RDWR, 0755)
if err != nil {
fmt.Printf("ERROR: %s\n", err.Error())
writeByte(0)
writeByte(0)
return
}
fileInfo, _ := file.Stat()
fileSize := int(fileInfo.Size())
fmt.Printf("FileSize: %d\n", fileSize)
fileSizeHigh := byte(fileSize >> 8)
fileSizeLow := byte(fileSize & 255)
writeByte(fileSizeLow)
writeByte(fileSizeHigh)
buffer := make([]byte, fileSize)
fmt.Printf("Read file %s SizeHigh: %d SizeLow: %d\n", fileName, fileSizeHigh, fileSizeLow)
file.Read(buffer)
for i := 0; i < fileSize; i++ {
err := writeByte(buffer[i])
if err != nil {
return
}
}
}
func readBlock(buffer []byte) error {
for i := 0; i < 512; i++ {
err := writeByte(buffer[i])
if err != nil {
return err
}
}
return nil
}
func writeBlock(buffer []byte) error {
var err error
for i := 0; i < 512; i++ {
buffer[i], err = readByte()
if err != nil {
return err
}
}
return nil
}
func initGpio() {
out_write = gpioreg.ByName("GPIO24")
out_read = gpioreg.ByName("GPIO25")
out_reserved2 = gpioreg.ByName("GPIO7") //note GPIO7 and CPIO8 require extra effort to use
out_reserved1 = gpioreg.ByName("GPIO8")
out_bit7 = gpioreg.ByName("GPIO5")
out_bit6 = gpioreg.ByName("GPIO11")
out_bit5 = gpioreg.ByName("GPIO9")
out_bit4 = gpioreg.ByName("GPIO10")
out_bit3 = gpioreg.ByName("GPIO22")
out_bit2 = gpioreg.ByName("GPIO27")
out_bit1 = gpioreg.ByName("GPIO17")
out_bit0 = gpioreg.ByName("GPIO4")
in_write = gpioreg.ByName("GPIO23")
in_read = gpioreg.ByName("GPIO18")
in_reserved2 = gpioreg.ByName("GPIO14")
in_reserved1 = gpioreg.ByName("GPIO15")
in_bit7 = gpioreg.ByName("GPIO12")
in_bit6 = gpioreg.ByName("GPIO16")
in_bit5 = gpioreg.ByName("GPIO20")
in_bit4 = gpioreg.ByName("GPIO21")
in_bit3 = gpioreg.ByName("GPIO26")
in_bit2 = gpioreg.ByName("GPIO19")
in_bit1 = gpioreg.ByName("GPIO13")
in_bit0 = gpioreg.ByName("GPIO6")
in_write.In(gpio.PullDown, gpio.BothEdges)
in_read.In(gpio.PullDown, gpio.BothEdges)
out_reserved1.Out(gpio.High)
out_reserved2.Out(gpio.High)
out_read.Out(gpio.High)
out_write.Out(gpio.High)
out_bit7.Out(gpio.Low)
out_bit6.Out(gpio.Low)
out_bit5.Out(gpio.Low)
out_bit4.Out(gpio.Low)
out_bit3.Out(gpio.Low)
out_bit2.Out(gpio.Low)
out_bit1.Out(gpio.Low)
out_bit0.Out(gpio.Low)
edgeTimeout = time.Second * 5
}
func dumpBlock(buffer []byte) {
for i := 0; i < 512; i++ {
fmt.Printf("%02X ", buffer[i])
}
}
func readString() (string, error) {
var inBytes bytes.Buffer
for {
inByte, err := readByte()
if err != nil {
return "", err
}
if inByte == 0 {
break
}
inBytes.WriteByte(inByte)
}
return string(inBytes.Bytes()), nil
}
func writeString(outString string) error {
for _, character := range outString {
err := writeByte(byte(character) | 128)
if err != nil {
fmt.Printf("Failed to write string\n")
return err
}
}
writeByte(0)
return nil
}
func readByte() (byte, error) {
// let the Apple II know we are ready to read
if debug {
fmt.Printf("let the Apple II know we are ready to read\n")
}
out_read.Out(gpio.Low)
// wait for the Apple II to write
if debug {
fmt.Printf("wait for the Apple II to write\n")
}
for in_write.Read() == gpio.High {
if !in_write.WaitForEdge(edgeTimeout) {
if debug {
fmt.Printf("Timed out reading byte -- write stuck high\n")
}
return 0, errors.New("Timed out reading byte -- write stuck high\n")
}
}
// get a nibble of data
if debug {
fmt.Printf("get a byte of data\n")
}
var data byte
data = 0
bit7 := in_bit7.Read()
bit6 := in_bit6.Read()
bit5 := in_bit5.Read()
bit4 := in_bit4.Read()
bit3 := in_bit3.Read()
bit2 := in_bit2.Read()
bit1 := in_bit1.Read()
bit0 := in_bit0.Read()
if bit7 == gpio.High {
data += 128
}
if bit6 == gpio.High {
data += 64
}
if bit5 == gpio.High {
data += 32
}
if bit4 == gpio.High {
data += 16
}
if bit3 == gpio.High {
data += 8
}
if bit2 == gpio.High {
data += 4
}
if bit1 == gpio.High {
data += 2
}
if bit0 == gpio.High {
data += 1
}
// let the Apple II know we are done reading
//fmt.Printf("let the Apple II know we are done reading\n")
out_read.Out(gpio.High)
// wait for the Apple II to finish writing
//fmt.Printf("wait for the Apple II to finish writing\n")
for in_write.Read() == gpio.Low {
if !in_write.WaitForEdge(edgeTimeout) {
if debug {
fmt.Printf("Timed out reading byte -- write stuck low\n")
}
return 0, errors.New("Timed out reading byte -- write stuck low")
}
}
return data, nil
}
func writeByte(data byte) error {
// wait for the Apple II to be ready to read
if debug {
fmt.Printf("wait for the Apple II to be ready to read\n")
}
for in_read.Read() == gpio.High {
if !in_read.WaitForEdge(edgeTimeout) {
if debug {
fmt.Printf("Timed out writing byte -- read stuck high\n")
}
return errors.New("Timed out writing byte -- read stuck high")
}
}
bit7 := gpio.Low
bit6 := gpio.Low
bit5 := gpio.Low
bit4 := gpio.Low
bit3 := gpio.Low
bit2 := gpio.Low
bit1 := gpio.Low
bit0 := gpio.Low
if ((data & 128) >> 7) == 1 {
bit7 = gpio.High
}
out_bit7.Out(bit7)
if ((data & 64) >> 6) == 1 {
bit6 = gpio.High
}
out_bit6.Out(bit6)
if ((data & 32) >> 5) == 1 {
bit5 = gpio.High
}
out_bit5.Out(bit5)
if ((data & 16) >> 4) == 1 {
bit4 = gpio.High
}
out_bit4.Out(bit4)
if ((data & 8) >> 3) == 1 {
bit3 = gpio.High
}
out_bit3.Out(bit3)
if ((data & 4) >> 2) == 1 {
bit2 = gpio.High
}
out_bit2.Out(bit2)
if ((data & 2) >> 1) == 1 {
bit1 = gpio.High
}
out_bit1.Out(bit1)
if (data & 1) == 1 {
bit0 = gpio.High
}
out_bit0.Out(bit0)
// let Apple II know we're writing
if debug {
fmt.Printf("let Apple II know we're writing\n")
}
out_write.Out(gpio.Low)
// wait for the Apple II to finsih reading
//fmt.Printf("wait for the Apple II to finsih reading\n")
for in_read.Read() == gpio.Low {
if !in_read.WaitForEdge(edgeTimeout) {
if debug {
fmt.Printf("Timed out writing byte -- read stuck low\n")
}
return errors.New("Timed out writing byte -- read stuck low")
}
}
// let the Apple II know we are done writing
if debug {
fmt.Printf("let the Apple II know we are done writing\n")
}
out_write.Out(gpio.High)
return nil
}
|
package cmd
import (
"github.com/spf13/cobra"
"github.com/wish/ctl/cmd/util/parsing"
"github.com/wish/ctl/pkg/client"
// "io"
"bufio"
)
func logsCmd(c *client.Client) *cobra.Command {
cmd := &cobra.Command{
Use: "logs pod [flags]",
Aliases: []string{"log"},
Short: "Get log of a container in a pod",
Long: `Print a detailed description of the selected pod.`,
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctxs, _ := cmd.Flags().GetStringSlice("context")
namespace, _ := cmd.Flags().GetString("namespace")
container, _ := cmd.Flags().GetString("container")
options, err := parsing.LogOptions(cmd, args)
options.Follow, _ = cmd.Flags().GetBool("follow")
options.Timestamps, _ = cmd.Flags().GetBool("timestamps")
if err != nil {
return err
}
reader, err := c.LogPodsOverContexts(ctxs, namespace, container, options)
if err != nil {
return err
}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
cmd.Println(scanner.Text())
}
if err = scanner.Err(); err != nil {
return err
}
return nil
},
}
cmd.Flags().StringP("container", "c", "", "Specify the container")
cmd.Flags().BoolP("follow", "f", false, "Specify if the logs should be streamed")
cmd.Flags().Bool("timestamps", false, "Add an RFC3339 or RFC3339Nano timestamp to the beginning of each line")
return cmd
}
|
package shared
import (
"encoding/json"
"encoding/xml"
"net/http"
"reflect"
"runtime"
utils "github.com/agungdwiprasetyo/go-utils"
)
// HTTPResponse abstract interface
type HTTPResponse interface {
JSON(w http.ResponseWriter)
XML(w http.ResponseWriter)
}
type (
// httpResponse model
httpResponse struct {
Success bool `json:"success"`
Code int `json:"code"`
Message string `json:"message"`
Meta *Meta `json:"meta,omitempty"`
Data interface{} `json:"data,omitempty"`
Errors interface{} `json:"errors,omitempty"`
}
// Meta model
Meta struct {
Page int `json:"page"`
Limit int `json:"limit"`
TotalRecords int `json:"totalRecords"`
}
)
// NewHTTPResponse for create common response, data must in first params and meta in second params
func NewHTTPResponse(code int, message string, params ...interface{}) HTTPResponse {
commonResponse := new(httpResponse)
for _, param := range params {
refValue := reflect.ValueOf(param)
if refValue.Kind() == reflect.Ptr {
refValue = refValue.Elem()
}
param = refValue.Interface()
switch data := param.(type) {
case Meta:
commonResponse.Meta = &data
case utils.MultiError:
commonResponse.Errors = data.ToMap()
default:
commonResponse.Data = param
}
}
if code < 400 {
commonResponse.Success = true
}
commonResponse.Code = code
commonResponse.Message = message
return commonResponse
}
// JSON for set http JSON response (Content-Type: application/json)
func (resp *httpResponse) JSON(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Origin", "Agung Dwi Prasetyo")
w.Header().Set("Go-Version", runtime.Version())
w.WriteHeader(resp.Code)
json.NewEncoder(w).Encode(resp)
}
// XML for set http XML response (Content-Type: application/xml)
func (resp *httpResponse) XML(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/xml")
w.Header().Set("Origin", "Agung Dwi Prasetyo")
w.Header().Set("Go-Version", runtime.Version())
w.WriteHeader(resp.Code)
xml.NewEncoder(w).Encode(resp)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ttlworker
import (
"context"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
timerapi "github.com/pingcap/tidb/timer/api"
"github.com/pingcap/tidb/ttl/cache"
"github.com/pingcap/tidb/ttl/session"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTTLTableStatusRows(status ...*cache.TableStatus) []chunk.Row {
c := chunk.NewChunkWithCapacity([]*types.FieldType{
types.NewFieldType(mysql.TypeLonglong), // table_id
types.NewFieldType(mysql.TypeLonglong), // parent_table_id
types.NewFieldType(mysql.TypeString), // table_statistics
types.NewFieldType(mysql.TypeString), // last_job_id
types.NewFieldType(mysql.TypeDatetime), // last_job_start_time
types.NewFieldType(mysql.TypeDatetime), // last_job_finish_time
types.NewFieldType(mysql.TypeDatetime), // last_job_ttl_expire
types.NewFieldType(mysql.TypeString), // last_job_summary
types.NewFieldType(mysql.TypeString), // current_job_id
types.NewFieldType(mysql.TypeString), // current_job_owner_id
types.NewFieldType(mysql.TypeString), // current_job_owner_addr
types.NewFieldType(mysql.TypeDatetime), // current_job_hb_time
types.NewFieldType(mysql.TypeDatetime), // current_job_start_time
types.NewFieldType(mysql.TypeDatetime), // current_job_ttl_expire
types.NewFieldType(mysql.TypeString), // current_job_state
types.NewFieldType(mysql.TypeString), // current_job_status
types.NewFieldType(mysql.TypeDatetime), // current_job_status_update_time
}, len(status))
var rows []chunk.Row
for _, s := range status {
tableID := types.NewDatum(s.TableID)
c.AppendDatum(0, &tableID)
parentTableID := types.NewDatum(s.ParentTableID)
c.AppendDatum(1, &parentTableID)
if s.TableStatistics == "" {
c.AppendNull(2)
} else {
tableStatistics := types.NewDatum(s.TableStatistics)
c.AppendDatum(2, &tableStatistics)
}
if s.LastJobID == "" {
c.AppendNull(3)
} else {
lastJobID := types.NewDatum(s.LastJobID)
c.AppendDatum(3, &lastJobID)
}
lastJobStartTime := types.NewDatum(types.NewTime(types.FromGoTime(s.LastJobStartTime), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(4, &lastJobStartTime)
lastJobFinishTime := types.NewDatum(types.NewTime(types.FromGoTime(s.LastJobFinishTime), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(5, &lastJobFinishTime)
lastJobTTLExpire := types.NewDatum(types.NewTime(types.FromGoTime(s.LastJobTTLExpire), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(6, &lastJobTTLExpire)
if s.LastJobSummary == "" {
c.AppendNull(7)
} else {
lastJobSummary := types.NewDatum(s.LastJobSummary)
c.AppendDatum(7, &lastJobSummary)
}
if s.CurrentJobID == "" {
c.AppendNull(8)
} else {
currentJobID := types.NewDatum(s.CurrentJobID)
c.AppendDatum(8, ¤tJobID)
}
if s.CurrentJobOwnerID == "" {
c.AppendNull(9)
} else {
currentJobOwnerID := types.NewDatum(s.CurrentJobOwnerID)
c.AppendDatum(9, ¤tJobOwnerID)
}
if s.CurrentJobOwnerAddr == "" {
c.AppendNull(10)
} else {
currentJobOwnerAddr := types.NewDatum(s.CurrentJobOwnerAddr)
c.AppendDatum(10, ¤tJobOwnerAddr)
}
currentJobOwnerHBTime := types.NewDatum(types.NewTime(types.FromGoTime(s.CurrentJobOwnerHBTime), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(11, ¤tJobOwnerHBTime)
currentJobStartTime := types.NewDatum(types.NewTime(types.FromGoTime(s.CurrentJobStartTime), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(12, ¤tJobStartTime)
currentJobTTLExpire := types.NewDatum(types.NewTime(types.FromGoTime(s.CurrentJobTTLExpire), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(13, ¤tJobTTLExpire)
if s.CurrentJobState == "" {
c.AppendNull(14)
} else {
currentJobState := types.NewDatum(s.CurrentJobState)
c.AppendDatum(14, ¤tJobState)
}
if s.CurrentJobStatus == "" {
c.AppendNull(15)
} else {
currentJobStatus := types.NewDatum(s.CurrentJobStatus)
c.AppendDatum(15, ¤tJobStatus)
}
currentJobStatusUpdateTime := types.NewDatum(types.NewTime(types.FromGoTime(s.CurrentJobStatusUpdateTime), mysql.TypeDatetime, types.MaxFsp))
c.AppendDatum(16, ¤tJobStatusUpdateTime)
}
iter := chunk.NewIterator4Chunk(c)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
rows = append(rows, row)
}
return rows
}
var updateStatusSQL = "SELECT LOW_PRIORITY table_id,parent_table_id,table_statistics,last_job_id,last_job_start_time,last_job_finish_time,last_job_ttl_expire,last_job_summary,current_job_id,current_job_owner_id,current_job_owner_addr,current_job_owner_hb_time,current_job_start_time,current_job_ttl_expire,current_job_state,current_job_status,current_job_status_update_time FROM mysql.tidb_ttl_table_status"
// TTLJob exports the ttlJob for test
type TTLJob = ttlJob
// LockJob is an exported version of lockNewJob for test
func (m *JobManager) LockJob(ctx context.Context, se session.Session, table *cache.PhysicalTable, now time.Time, createJobID string, checkInterval bool) (*TTLJob, error) {
if createJobID == "" {
return m.lockHBTimeoutJob(ctx, se, table, now)
}
return m.lockNewJob(ctx, se, table, now, createJobID, checkInterval)
}
// RunningJobs returns the running jobs inside ttl job manager
func (m *JobManager) RunningJobs() []*TTLJob {
return m.runningJobs
}
// InfoSchemaCache is an exported getter of infoSchemaCache for test
func (m *JobManager) InfoSchemaCache() *cache.InfoSchemaCache {
return m.infoSchemaCache
}
// TableStatusCache is an exported getter of TableStatusCache for test.
func (m *JobManager) TableStatusCache() *cache.TableStatusCache {
return m.tableStatusCache
}
// RescheduleJobs is an exported version of rescheduleJobs for test
func (m *JobManager) RescheduleJobs(se session.Session, now time.Time) {
m.rescheduleJobs(se, now)
}
func (m *JobManager) SubmitJob(se session.Session, tableID, physicalID int64, requestID string) error {
ch := make(chan error, 1)
m.handleSubmitJobRequest(se, &SubmitTTLManagerJobRequest{
TableID: tableID,
PhysicalID: physicalID,
RequestID: requestID,
RespCh: ch,
})
return <-ch
}
// TaskManager is an exported getter of task manager for test
func (m *JobManager) TaskManager() *taskManager {
return m.taskManager
}
// UpdateHeartBeat is an exported version of updateHeartBeat for test
func (m *JobManager) UpdateHeartBeat(ctx context.Context, se session.Session, now time.Time) error {
return m.updateHeartBeat(ctx, se, now)
}
// ReportMetrics is an exported version of reportMetrics
func (m *JobManager) ReportMetrics(se session.Session) {
m.reportMetrics(se)
}
func (j *ttlJob) Finish(se session.Session, now time.Time, summary *TTLSummary) {
j.finish(se, now, summary)
}
func (j *ttlJob) ID() string {
return j.id
}
func newMockTTLJob(tbl *cache.PhysicalTable, status cache.JobStatus) *ttlJob {
return &ttlJob{tbl: tbl, status: status}
}
func TestReadyForLockHBTimeoutJobTables(t *testing.T) {
tbl := newMockTTLTbl(t, "t1")
m := NewJobManager("test-id", nil, nil, nil, nil)
m.sessPool = newMockSessionPool(t, tbl)
se := newMockSession(t, tbl)
tblWithDailyInterval := newMockTTLTbl(t, "t2")
tblWithDailyInterval.TTLInfo.JobInterval = "1d"
cases := []struct {
name string
infoSchemaTables []*cache.PhysicalTable
tableStatus []*cache.TableStatus
shouldSchedule bool
}{
// for a newly inserted table, it'll always not be scheduled because no job running
{"newly created", []*cache.PhysicalTable{tbl}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID}}, false},
// table only in the table status cache will not be scheduled
{"proper subset", []*cache.PhysicalTable{}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID}}, false},
// table whose current job owner id is not empty, and heart beat time is long enough will not be scheduled
{"current job not empty", []*cache.PhysicalTable{tbl}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID, CurrentJobID: "job1", CurrentJobOwnerID: "test-another-id", CurrentJobOwnerHBTime: time.Now()}}, false},
// table whose current job owner id is not empty, but heart beat time is expired will be scheduled
{"hb time expired", []*cache.PhysicalTable{tbl}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID, CurrentJobID: "job1", CurrentJobOwnerID: "test-another-id", CurrentJobOwnerHBTime: time.Now().Add(-time.Hour)}}, true},
// if the last start time is too near, it will not be scheduled because no job running
{"last start time too near", []*cache.PhysicalTable{tbl}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID, LastJobStartTime: time.Now()}}, false},
// if the last start time is expired, it will not be scheduled because no job running
{"last start time expired", []*cache.PhysicalTable{tbl}, []*cache.TableStatus{{TableID: tbl.ID, ParentTableID: tbl.ID, LastJobStartTime: time.Now().Add(-time.Hour * 2)}}, false},
// if the interval is 24h, and the last start time is near, it will not be scheduled because no job running
{"last start time too near for 24h", []*cache.PhysicalTable{tblWithDailyInterval}, []*cache.TableStatus{{TableID: tblWithDailyInterval.ID, ParentTableID: tblWithDailyInterval.ID, LastJobStartTime: time.Now().Add(-time.Hour * 2)}}, false},
// if the interval is 24h, and the last start time is far enough, it will not be scheduled because no job running
{"last start time far enough for 24h", []*cache.PhysicalTable{tblWithDailyInterval}, []*cache.TableStatus{{TableID: tblWithDailyInterval.ID, ParentTableID: tblWithDailyInterval.ID, LastJobStartTime: time.Now().Add(-time.Hour * 25)}}, false},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
m.infoSchemaCache.Tables = make(map[int64]*cache.PhysicalTable)
for _, ist := range c.infoSchemaTables {
m.infoSchemaCache.Tables[ist.ID] = ist
}
m.tableStatusCache.Tables = make(map[int64]*cache.TableStatus)
for _, st := range c.tableStatus {
m.tableStatusCache.Tables[st.TableID] = st
}
tables := m.readyForLockHBTimeoutJobTables(se.Now())
if c.shouldSchedule {
assert.Len(t, tables, 1)
assert.Equal(t, int64(0), tables[0].ID)
assert.Equal(t, int64(0), tables[0].TableInfo.ID)
} else {
assert.Len(t, tables, 0)
}
})
}
}
func TestOnTimerTick(t *testing.T) {
var leader atomic.Bool
m := NewJobManager("test-id", newMockSessionPool(t), nil, nil, func() bool {
return leader.Load()
})
tbl := newMockTTLTbl(t, "t1")
se := newMockSession(t)
se.sessionInfoSchema = newMockInfoSchemaWithVer(100, tbl.TableInfo)
timerStore := timerapi.NewMemoryTimerStore()
defer timerStore.Close()
a := &mockJobAdapter{}
a.On("CanSubmitJob").Return(false).Maybe()
rt := newTTLTimerRuntime(timerStore, a)
require.Nil(t, rt.rt)
defer rt.Pause()
now := time.UnixMilli(3600 * 24)
syncer := NewTTLTimerSyncer(m.sessPool, timerapi.NewDefaultTimerClient(timerStore))
syncer.nowFunc = func() time.Time {
return now
}
// pause after init
m.onTimerTick(se, rt, syncer, now)
require.Nil(t, rt.rt)
require.Equal(t, 0, len(syncer.key2Timers))
syncTime, syncVer := syncer.GetLastSyncInfo()
require.Zero(t, syncVer)
require.True(t, syncTime.IsZero())
// resume first time
leader.Store(true)
m.onTimerTick(se, rt, syncer, now)
innerRT := rt.rt
require.NotNil(t, innerRT)
require.True(t, innerRT.Running())
require.Equal(t, 1, len(syncer.key2Timers))
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Equal(t, int64(100), syncVer)
require.Equal(t, now, syncTime)
// resume after a very short duration
now = now.Add(time.Second)
se.sessionInfoSchema = newMockInfoSchemaWithVer(101, tbl.TableInfo)
m.onTimerTick(se, rt, syncer, now)
require.Same(t, innerRT, rt.rt)
require.True(t, innerRT.Running())
require.Equal(t, 1, len(syncer.key2Timers))
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Equal(t, int64(100), syncVer)
require.Equal(t, now.Add(-time.Second), syncTime)
// resume after a middle duration
now = now.Add(6 * time.Second)
m.onTimerTick(se, rt, syncer, now)
require.Same(t, innerRT, rt.rt)
require.True(t, innerRT.Running())
require.Equal(t, 1, len(syncer.key2Timers))
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Equal(t, int64(101), syncVer)
require.Equal(t, now, syncTime)
// resume after a middle duration but infoschema not change
now = now.Add(6 * time.Second)
m.onTimerTick(se, rt, syncer, now)
require.Same(t, innerRT, rt.rt)
require.True(t, innerRT.Running())
require.Equal(t, 1, len(syncer.key2Timers))
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Equal(t, int64(101), syncVer)
require.Equal(t, now.Add(-6*time.Second), syncTime)
// resume after a long duration
now = now.Add(3 * time.Minute)
m.onTimerTick(se, rt, syncer, now)
require.Same(t, innerRT, rt.rt)
require.True(t, innerRT.Running())
require.Equal(t, 1, len(syncer.key2Timers))
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Equal(t, int64(101), syncVer)
require.Equal(t, now, syncTime)
// pause
leader.Store(false)
m.onTimerTick(se, rt, syncer, now)
require.Nil(t, rt.rt)
require.False(t, innerRT.Running())
syncTime, syncVer = syncer.GetLastSyncInfo()
require.Zero(t, syncVer)
require.True(t, syncTime.IsZero())
}
func TestLockTable(t *testing.T) {
now, err := time.Parse(timeFormat, "2022-12-05 17:13:05")
assert.NoError(t, err)
newJobExpireTime := now.Add(-time.Minute)
oldJobExpireTime := now.Add(-time.Hour)
oldJobStartTime := now.Add(-30 * time.Minute)
testPhysicalTable := &cache.PhysicalTable{ID: 1, Schema: model.NewCIStr("test"), TableInfo: &model.TableInfo{ID: 1, Name: model.NewCIStr("t1"), TTLInfo: &model.TTLInfo{ColumnName: model.NewCIStr("test"), IntervalExprStr: "5 Year", JobInterval: "1h"}}}
type executeInfo struct {
sql string
args []interface{}
}
getExecuteInfo := func(sql string, args []interface{}) executeInfo {
return executeInfo{
sql,
args,
}
}
getExecuteInfoForUpdate := func(sql string, args []interface{}) executeInfo {
return executeInfo{
sql + " FOR UPDATE NOWAIT",
args,
}
}
getExecuteInfoWithErr := func(sql string, args []interface{}, err error) executeInfo {
require.NoError(t, err)
return executeInfo{
sql,
args,
}
}
type sqlExecute struct {
executeInfo
rows []chunk.Row
err error
}
cases := []struct {
name string
table *cache.PhysicalTable
sqls []sqlExecute
isCreate bool
checkInterval bool
hasError bool
}{
{"normal lock table for create", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("new-job-id", 1, now, now, newJobExpireTime, "test-id")),
nil, nil,
},
{
getExecuteInfo(createJobHistorySQL("new-job-id", testPhysicalTable, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfoWithErr(cache.InsertIntoTTLTask(newMockSession(t), "new-job-id", 1, 0, nil, nil, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfo(updateStatusSQL, nil),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, true, false, false},
{"normal lock table for create and check interval", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("new-job-id", 1, now, now, newJobExpireTime, "test-id")),
nil, nil,
},
{
getExecuteInfo(createJobHistorySQL("new-job-id", testPhysicalTable, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfoWithErr(cache.InsertIntoTTLTask(newMockSession(t), "new-job-id", 1, 0, nil, nil, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfo(updateStatusSQL, nil),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, true, true, false},
{"normal lock table for exist job", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, false, false, true},
{"select nothing for create", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
nil, nil,
},
{
getExecuteInfo(insertNewTableIntoStatusSQL(1, 1)),
nil, nil,
},
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("new-job-id", 1, now, now, newJobExpireTime, "test-id")),
nil, nil,
},
{
getExecuteInfo(createJobHistorySQL("new-job-id", testPhysicalTable, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfoWithErr(cache.InsertIntoTTLTask(newMockSession(t), "new-job-id", 1, 0, nil, nil, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfo(updateStatusSQL, nil),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, true, false, false},
{"select nothing for create and check interval", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
nil, nil,
},
{
getExecuteInfo(insertNewTableIntoStatusSQL(1, 1)),
nil, nil,
},
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("new-job-id", 1, now, now, newJobExpireTime, "test-id")),
nil, nil,
},
{
getExecuteInfo(createJobHistorySQL("new-job-id", testPhysicalTable, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfoWithErr(cache.InsertIntoTTLTask(newMockSession(t), "new-job-id", 1, 0, nil, nil, newJobExpireTime, now)),
nil, nil,
},
{
getExecuteInfo(updateStatusSQL, nil),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, true, true, false},
{"select nothing for exist job", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
nil, nil,
},
}, false, false, true},
{"running job but create", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now, CurrentJobStartTime: oldJobStartTime}),
nil,
},
}, true, false, true},
{"running job but create and check interval", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now, CurrentJobStartTime: oldJobStartTime}),
nil,
},
}, true, true, true},
{"running job but lock for exist job", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now, CurrentJobStartTime: oldJobStartTime}),
nil,
},
}, false, false, true},
{"heartbeat timeout job but create", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now.Add(-20 * time.Minute), CurrentJobStartTime: oldJobStartTime}),
nil,
},
}, true, false, true},
{"heartbeat timeout job but create with check interval", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now.Add(-20 * time.Minute), CurrentJobStartTime: oldJobStartTime}),
nil,
},
}, true, true, true},
{"heartbeat timeout job for lock", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1, CurrentJobTTLExpire: oldJobExpireTime, CurrentJobID: "job1", CurrentJobOwnerID: "owner1", CurrentJobOwnerHBTime: now.Add(-20 * time.Minute), CurrentJobStartTime: oldJobStartTime}),
nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("job1", 1, oldJobStartTime, now, oldJobExpireTime, "test-id")),
nil, nil,
},
{
getExecuteInfo(updateStatusSQL, nil),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
}, false, false, false},
{"return error", testPhysicalTable, []sqlExecute{
{
getExecuteInfoForUpdate(cache.SelectFromTTLTableStatusWithID(1)),
newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil,
},
{
getExecuteInfo(setTableStatusOwnerSQL("new-job-id", 1, now, now, newJobExpireTime, "test-id")),
nil, errors.New("test error message"),
},
}, true, false, true},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
m := NewJobManager("test-id", newMockSessionPool(t), nil, nil, nil)
m.infoSchemaCache.Tables[c.table.ID] = c.table
sqlCounter := 0
se := newMockSession(t)
se.executeSQL = func(ctx context.Context, sql string, args ...interface{}) (rows []chunk.Row, err error) {
assert.Less(t, sqlCounter, len(c.sqls))
assert.Equal(t, c.sqls[sqlCounter].sql, sql)
assert.Equal(t, c.sqls[sqlCounter].args, args)
rows = c.sqls[sqlCounter].rows
err = c.sqls[sqlCounter].err
sqlCounter += 1
return
}
se.evalExpire = newJobExpireTime
var job *ttlJob
if c.isCreate {
job, err = m.lockNewJob(context.Background(), se, c.table, now, "new-job-id", c.checkInterval)
} else {
job, err = m.lockHBTimeoutJob(context.Background(), se, c.table, now)
}
require.Equal(t, len(c.sqls), sqlCounter)
if c.hasError {
assert.NotNil(t, err)
assert.Nil(t, job)
} else {
assert.Nil(t, err)
assert.NotNil(t, job)
assert.Equal(t, "test-id", job.ownerID)
assert.Equal(t, cache.JobStatusRunning, job.status)
assert.NotNil(t, job.tbl)
assert.Same(t, c.table, job.tbl)
if c.isCreate {
assert.Equal(t, "new-job-id", job.id)
assert.Equal(t, now, job.createTime)
assert.Equal(t, newJobExpireTime, job.ttlExpireTime)
} else {
assert.Equal(t, "job1", job.id)
assert.Equal(t, oldJobStartTime, job.createTime)
assert.Equal(t, oldJobExpireTime, job.ttlExpireTime)
}
require.Equal(t, 1, len(m.runningJobs))
require.Same(t, job, m.runningJobs[0])
}
})
}
}
func TestLocalJobs(t *testing.T) {
tbl1 := newMockTTLTbl(t, "t1")
tbl1.ID = 1
tbl2 := newMockTTLTbl(t, "t2")
tbl2.ID = 2
m := NewJobManager("test-id", nil, nil, nil, nil)
m.sessPool = newMockSessionPool(t, tbl1, tbl2)
m.runningJobs = []*ttlJob{{tbl: tbl1, id: "1"}, {tbl: tbl2, id: "2"}}
m.tableStatusCache.Tables = map[int64]*cache.TableStatus{
tbl1.ID: {
CurrentJobOwnerID: m.id,
},
tbl2.ID: {
CurrentJobOwnerID: "another-id",
},
}
assert.Len(t, m.localJobs(), 1)
assert.Equal(t, m.localJobs()[0].id, "1")
}
|
package reset
import (
"github.com/devspace-cloud/devspace/pkg/util/factory"
"github.com/spf13/cobra"
)
type keyCmd struct {
Provider string
}
func newKeyCmd(f factory.Factory) *cobra.Command {
cmd := &keyCmd{}
keyCmd := &cobra.Command{
Use: "key",
Short: "Resets a cluster key",
Long: `
#######################################################
############### devspace reset key ####################
#######################################################
Resets a key for a given cluster. Useful if the key
cannot be obtained anymore. Needs cluster access scope
Examples:
devspace reset key my-cluster
#######################################################
`,
Args: cobra.ExactArgs(1),
RunE: func(cobraCmd *cobra.Command, args []string) error {
return cmd.RunResetkey(f, cobraCmd, args)
}}
keyCmd.Flags().StringVar(&cmd.Provider, "provider", "", "The cloud provider to use")
return keyCmd
}
// RunResetkey executes the reset key command logic
func (cmd *keyCmd) RunResetkey(f factory.Factory, cobraCmd *cobra.Command, args []string) error {
// Get provider
log := f.GetLog()
provider, err := f.GetProvider(cmd.Provider, log)
if err != nil {
return err
}
// Reset the key
err = provider.ResetKey(args[0])
if err != nil {
return err
}
log.Donef("Successfully reseted key for cluster %s", args[0])
return nil
}
|
package main
import (
"log"
"net/http"
l "github.com/eriklindqvist/recepies_api/app/lib"
)
func main() {
log.Printf("Server started")
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir(l.Getenv("FILEBASE", "files")))))
if err := http.ListenAndServe(":" + l.Getenv("PORT", "3003"), nil); err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Program ocagent collects OpenCensus stats and traces
// to export to a configured backend.
package main
import (
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/signal"
"time"
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
"github.com/census-instrumentation/opencensus-service/exporter"
"github.com/census-instrumentation/opencensus-service/interceptor/opencensus"
"github.com/census-instrumentation/opencensus-service/interceptor/zipkin"
"github.com/census-instrumentation/opencensus-service/internal"
"github.com/census-instrumentation/opencensus-service/spanreceiver"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats/view"
"go.opencensus.io/zpages"
)
var configYAMLFile string
var ocInterceptorPort int
const zipkinRoute = "/api/v2/spans"
func main() {
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
}
}
func runOCAgent() {
yamlBlob, err := ioutil.ReadFile(configYAMLFile)
if err != nil {
log.Fatalf("Cannot read the YAML file %v error: %v", configYAMLFile, err)
}
agentConfig, err := parseOCAgentConfig(yamlBlob)
if err != nil {
log.Fatalf("Failed to parse own configuration %v error: %v", configYAMLFile, err)
}
// Ensure that we check and catch any logical errors with the
// configuration e.g. if an interceptor shares the same address
// as an exporter which would cause a self DOS and waste resources.
if err := agentConfig.checkLogicalConflicts(yamlBlob); err != nil {
log.Fatalf("Configuration logical error: %v", err)
}
ocInterceptorAddr := agentConfig.ocInterceptorAddress()
traceExporters, closeFns := exportersFromYAMLConfig(yamlBlob)
commonSpanReceiver := exporter.MultiTraceExporters(traceExporters...)
// Add other interceptors here as they are implemented
ocInterceptorDoneFn, err := runOCInterceptor(ocInterceptorAddr, commonSpanReceiver)
if err != nil {
log.Fatal(err)
}
closeFns = append(closeFns, ocInterceptorDoneFn)
// If zPages are enabled, run them
zPagesPort, zPagesEnabled := agentConfig.zPagesPort()
if zPagesEnabled {
zCloseFn := runZPages(zPagesPort)
closeFns = append(closeFns, zCloseFn)
}
// If the Zipkin interceptor is enabled, then run it
if agentConfig.zipkinInterceptorEnabled() {
zipkinInterceptorAddr := agentConfig.zipkinInterceptorAddress()
zipkinInterceptorDoneFn, err := runZipkinInterceptor(zipkinInterceptorAddr, commonSpanReceiver)
if err != nil {
log.Fatal(err)
}
closeFns = append(closeFns, zipkinInterceptorDoneFn)
}
// Always cleanup finally
defer func() {
for _, closeFn := range closeFns {
if closeFn != nil {
closeFn()
}
}
}()
signalsChan := make(chan os.Signal)
signal.Notify(signalsChan, os.Interrupt)
// Wait for the closing signal
<-signalsChan
}
func runZPages(port int) func() error {
// And enable zPages too
zPagesMux := http.NewServeMux()
zpages.Handle(zPagesMux, "/debug")
addr := fmt.Sprintf(":%d", port)
ln, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("Failed to bind to run zPages on %q: %v", addr, err)
}
srv := http.Server{Handler: zPagesMux}
go func() {
log.Printf("Running zPages at %q", addr)
if err := srv.Serve(ln); err != nil && err != http.ErrServerClosed {
log.Fatalf("Failed to serve zPages: %v", err)
}
}()
return srv.Close
}
func runOCInterceptor(addr string, sr spanreceiver.SpanReceiver) (doneFn func() error, err error) {
oci, err := ocinterceptor.New(sr, ocinterceptor.WithSpanBufferPeriod(800*time.Millisecond))
if err != nil {
return nil, fmt.Errorf("Failed to create the OpenCensus interceptor: %v", err)
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, fmt.Errorf("Cannot bind to address %q: %v", addr, err)
}
srv := internal.GRPCServerWithObservabilityEnabled()
if err := view.Register(internal.AllViews...); err != nil {
return nil, fmt.Errorf("Failed to register internal.AllViews: %v", err)
}
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
return nil, fmt.Errorf("Failed to register ocgrpc.DefaultServerViews: %v", err)
}
agenttracepb.RegisterTraceServiceServer(srv, oci)
go func() {
log.Printf("Running OpenCensus interceptor as a gRPC service at %q", addr)
if err := srv.Serve(ln); err != nil {
log.Fatalf("Failed to run OpenCensus interceptor: %v", err)
}
}()
doneFn = func() error {
srv.Stop()
return nil
}
return doneFn, nil
}
func runZipkinInterceptor(addr string, sr spanreceiver.SpanReceiver) (doneFn func() error, err error) {
zi, err := zipkininterceptor.New(sr)
if err != nil {
return nil, fmt.Errorf("Failed to create the Zipkin interceptor: %v", err)
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, fmt.Errorf("Cannot bind Zipkin interceptor to address %q: %v", addr, err)
}
mux := http.NewServeMux()
mux.Handle(zipkinRoute, zi)
go func() {
fullAddr := addr + zipkinRoute
log.Printf("Running the Zipkin interceptor at %q", fullAddr)
if err := http.Serve(ln, mux); err != nil {
log.Fatalf("Failed to serve the Zipkin interceptor: %v", err)
}
}()
doneFn = ln.Close
return doneFn, nil
}
|
package router
import (
"log"
"net/http"
"garduino/controllers"
"github.com/julienschmidt/httprouter"
)
// Listen and route connections.
func Listen() {
router := httprouter.New()
router.POST("/", controllers.Injest)
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package controller
import (
"GoCRUDs/internal/handler"
"GoCRUDs/pkg/constants"
"GoCRUDs/pkg/request"
"GoCRUDs/pkg/response"
"GoCRUDs/pkg/utils"
"encoding/json"
"io/ioutil"
"net/http"
)
var userHandler *handler.UserHandler
func init() {
userHandler = handler.GetUserHandler()
}
func CreateUser(w http.ResponseWriter, r *http.Request) {
var user request.User
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
utils.RespondWithJSON(w, constants.FailureResponseMessage, constants.FailureResponseCode, err, nil)
return
}
if jErr := json.Unmarshal(reqBody, &user); jErr != nil {
utils.RespondWithJSON(w, constants.FailureResponseMessage, constants.FailureResponseCode, jErr, nil)
return
}
err, Id := userHandler.CreateUser(user)
if err != nil {
utils.RespondWithJSON(w, constants.SuccessResponseMessage, constants.FailureResponseCode, err, nil)
return
}
resp := response.UserResponse{
Id: Id,
}
utils.RespondWithJSON(w, constants.SuccessResponseMessage, constants.SuccessResponseCode, nil, resp)
}
|
package operation
import (
"fmt"
"math"
)
const swapV2ConfTarget = 250 // Approx 2 days
// TODO: we should make FeeWindow enforce a non empty Map of TargetedFees via constructor
type FeeWindow struct {
TargetedFees map[uint]float64
}
// Get the appropriate fee rate for a given swap (depends on confirmations needed). Useful
// method for when swap doesn't have a fixed amount (e.g AmountLessInvoices + use all funds).
func (f *FeeWindow) SwapFeeRate(confirmationsNeeded uint) (float64, error) {
if confirmationsNeeded == 0 {
return f.minimumFeeRate(swapV2ConfTarget)
}
return f.fastestFeeRate(), nil
}
// Get the minimum available fee rate that will hit a given confirmation target. We make no
// guesses (no averages or interpolations), so we might overshoot the fee if data is too sparse.
func (f *FeeWindow) minimumFeeRate(confirmationTarget uint) (float64, error) {
if confirmationTarget <= 0 {
return 0, fmt.Errorf("can't get feeRate. Expected positive confirmation target, got %v", confirmationTarget)
}
// Walk the available targets backwards, finding the highest target below the given one:
for closestTarget := confirmationTarget; closestTarget > 0; closestTarget-- {
if feeRate, containsKey := f.TargetedFees[closestTarget]; containsKey {
// Found! This is the lowest fee rate that hits the given target.
return feeRate, nil
}
}
// No result? This is odd, but not illogical. It means *all* of our available targets
// are above the requested one. Let's use the fastest:
return f.fastestFeeRate(), nil
}
// Get the fastest fee rate, in satoshis per weight unit.
func (f *FeeWindow) fastestFeeRate() float64 {
var lowestTarget uint = math.MaxUint32
for k := range f.TargetedFees {
if k < lowestTarget {
lowestTarget = k
}
}
return f.TargetedFees[lowestTarget]
}
|
package main
import (
"fmt"
"math"
)
func solve() int {
var cnt int
n := 1
var t int
for t < 10 {
t = int(math.Ceil(math.Pow(10, float64(n-1)/float64(n))))
cnt += 10 - t
n++
}
return cnt
}
func main() {
fmt.Println(solve())
}
// How many n-digit positive integers exist which are also an nth power?
// Note:
// Equivalent of asking the number of integer solution of x for which:
// 10^(n-1) <= x^n < 10^n
// So we could get the range for x and n.
|
package main
import (
"context"
"fmt"
"log"
"sync"
"sync/atomic"
"time"
pb "github.com/IgorBaskakov/service/cache"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
const (
address = "localhost:50051"
maxConnection = 500
maxLen = 100
)
func sendGRPCRequest(wg *sync.WaitGroup, ops *uint64) {
defer wg.Done()
opt1 := grpc.WithInsecure()
kep := keepalive.ClientParameters{
Time: 2 * time.Minute,
}
opt2 := grpc.WithKeepaliveParams(kep)
options := []grpc.DialOption{opt1, opt2}
// Set up a connection to the server.
conn, err := grpc.Dial(address, options...)
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
client := pb.NewCacherClient(conn)
stream, err := client.GetRandomDataStream(context.Background(), &pb.Nothing{})
if err != nil {
// log.Printf("error get random data stream: %v", err)
return
}
for {
_, err := stream.Recv()
if err != nil { //io.EOF
return
}
atomic.AddUint64(ops, 1)
}
}
func main() {
var ops uint64
defer func(now time.Time) {
fmt.Printf("spent %s\n", time.Since(now))
}(time.Now())
wg := &sync.WaitGroup{}
for i := 0; i < maxConnection; i++ {
wg.Add(1)
go sendGRPCRequest(wg, &ops)
}
wg.Wait()
opsFinal := atomic.LoadUint64(&ops)
fmt.Println("count read from stream:", opsFinal)
}
|
package namespace
import (
"context"
"os"
)
const (
MREPL int = iota
MBEFORE
MAFTER
)
type Namespace interface {
Bind(new, old string, flags int)
Mount(new, old string, flags int)
Cmd(ctx context.Context, cmd string, args ...string) Cmd
Mkdir(name string) error
Open(name string) (*os.File, error)
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
Create(name string) (*os.File, error)
}
type Workspace interface {
Namespace
MountWorkspace(s string)
}
type Cmd interface {
Run() error
Start() error
Wait() error
CombinedOutput() ([]byte, error)
Output() ([]byte, error)
}
|
// Package api provides an example on how to use go-fuzz.
package api
import (
"encoding/json"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
// Need a named type for our user.
type user struct {
Type string
Name string
Age int
}
// Routes initialize the routes.
func Routes() {
http.HandleFunc("/process", Process)
}
// Process handles the processing of data.
func Process(w http.ResponseWriter, r *http.Request) {
// Capture the data that was posted over.
data, err := ioutil.ReadAll(r.Body)
if err != nil {
SendError(w, err)
return
}
// Split the data by comma.
parts := strings.Split(string(data), ",")
// Create a slice of users.
var users []user
// Iterate over the set of users we received.
for _, part := range parts {
// Extract the user.
u, err := extractUser(part)
if err != nil {
SendError(w, err)
return
}
// Add a user to the slice.
users = append(users, u)
}
// Respond with the processed data.
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(users)
}
// SendError responds with an error.
func SendError(w http.ResponseWriter, err error) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
}
// extractUser knows how to extract a user from the string.
func extractUser(data string) (user, error) {
// Capture the age and convert to integer.
age, err := strconv.Atoi(data[3:5])
if err != nil {
return user{}, err
}
// Create the user value.
u := user{
Type: data[:3],
Name: data[5:],
Age: age,
}
return u, nil
}
|
package datastruct
import "fmt"
func ExampleStack() {
// test for type int
intStack := NewStack(10)
intStack.Push(10)
intStack.Push(1)
intStack.Push(-5)
fmt.Println(intStack.Top())
intStack.Pop()
intStack.Push(5)
for !intStack.IsEmpty() {
fmt.Println(intStack.Top())
intStack.Pop()
}
// test for type struct
type pair struct {
first, second int
}
structStack := NewStack(10)
structStack.Push(&pair{first: 3, second: 10})
structStack.Push(&pair{first: -1, second: 2})
structStack.Push(&pair{first: 5, second: -5})
top := structStack.Top().(*pair)
fmt.Println(top.first, top.second)
structStack.Pop()
structStack.Push(&pair{0, 3})
for !structStack.IsEmpty() {
top = structStack.Top().(*pair)
fmt.Println(top.first, top.second)
structStack.Pop()
}
// Output:
// -5
// 5
// 1
// 10
// 5 -5
// 0 3
// -1 2
// 3 10
}
|
package main
import (
"fmt"
"log"
"net/http"
"github.com/PuerkitoBio/goquery"
)
//GetData Function to get the html
func GetData(url string) {
res, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
}
// Find the review items
doc.Find("#Nse_Prc_tick").Each(func(i int, s *goquery.Selection) {
// For each item found, get the band and title
band := s.Find("strong").Text()
fmt.Printf("stock price %d: %s \n", i, band)
})
}
func main() {
fmt.Printf("starting the stock-scraper\n")
n := map[string]string{
"Adani gas": "https://www.moneycontrol.com/india/stockpricequote/miscellaneous/adanigaslimited/ADG01",
"Rain Industries": "https://www.moneycontrol.com/india/stockpricequote/miscellaneous/adanigaslimited/ADG01",
}
// var urls = []string{"https://www.moneycontrol.com/india/stockpricequote/miscellaneous/adanigaslimited/ADG01",
// "https://www.moneycontrol.com/india/stockpricequote/miscellaneous/adanigaslimited/ADG01"}
for name, url := range n {
fmt.Printf("Url %s : %s \n", name, url)
GetData(url)
}
}
|
package dontknowtrade
import (
"github.com/shopspring/decimal"
"github.com/quickfixgo/quickfix"
"github.com/quickfixgo/quickfix/enum"
"github.com/quickfixgo/quickfix/field"
"github.com/quickfixgo/quickfix/fix40"
"github.com/quickfixgo/quickfix/tag"
)
//DontKnowTrade is the fix40 DontKnowTrade type, MsgType = Q
type DontKnowTrade struct {
fix40.Header
*quickfix.Body
fix40.Trailer
Message *quickfix.Message
}
//FromMessage creates a DontKnowTrade from a quickfix.Message instance
func FromMessage(m *quickfix.Message) DontKnowTrade {
return DontKnowTrade{
Header: fix40.Header{&m.Header},
Body: &m.Body,
Trailer: fix40.Trailer{&m.Trailer},
Message: m,
}
}
//ToMessage returns a quickfix.Message instance
func (m DontKnowTrade) ToMessage() *quickfix.Message {
return m.Message
}
//New returns a DontKnowTrade initialized with the required fields for DontKnowTrade
func New(dkreason field.DKReasonField, symbol field.SymbolField, side field.SideField, orderqty field.OrderQtyField, lastshares field.LastSharesField, lastpx field.LastPxField) (m DontKnowTrade) {
m.Message = quickfix.NewMessage()
m.Header = fix40.NewHeader(&m.Message.Header)
m.Body = &m.Message.Body
m.Trailer.Trailer = &m.Message.Trailer
m.Header.Set(field.NewMsgType("Q"))
m.Set(dkreason)
m.Set(symbol)
m.Set(side)
m.Set(orderqty)
m.Set(lastshares)
m.Set(lastpx)
return
}
//A RouteOut is the callback type that should be implemented for routing Message
type RouteOut func(msg DontKnowTrade, sessionID quickfix.SessionID) quickfix.MessageRejectError
//Route returns the beginstring, message type, and MessageRoute for this Message type
func Route(router RouteOut) (string, string, quickfix.MessageRoute) {
r := func(msg *quickfix.Message, sessionID quickfix.SessionID) quickfix.MessageRejectError {
return router(FromMessage(msg), sessionID)
}
return "FIX.4.0", "Q", r
}
//SetExecID sets ExecID, Tag 17
func (m DontKnowTrade) SetExecID(v string) {
m.Set(field.NewExecID(v))
}
//SetLastPx sets LastPx, Tag 31
func (m DontKnowTrade) SetLastPx(value decimal.Decimal, scale int32) {
m.Set(field.NewLastPx(value, scale))
}
//SetLastShares sets LastShares, Tag 32
func (m DontKnowTrade) SetLastShares(value decimal.Decimal, scale int32) {
m.Set(field.NewLastShares(value, scale))
}
//SetOrderID sets OrderID, Tag 37
func (m DontKnowTrade) SetOrderID(v string) {
m.Set(field.NewOrderID(v))
}
//SetOrderQty sets OrderQty, Tag 38
func (m DontKnowTrade) SetOrderQty(value decimal.Decimal, scale int32) {
m.Set(field.NewOrderQty(value, scale))
}
//SetSide sets Side, Tag 54
func (m DontKnowTrade) SetSide(v enum.Side) {
m.Set(field.NewSide(v))
}
//SetSymbol sets Symbol, Tag 55
func (m DontKnowTrade) SetSymbol(v string) {
m.Set(field.NewSymbol(v))
}
//SetText sets Text, Tag 58
func (m DontKnowTrade) SetText(v string) {
m.Set(field.NewText(v))
}
//SetDKReason sets DKReason, Tag 127
func (m DontKnowTrade) SetDKReason(v enum.DKReason) {
m.Set(field.NewDKReason(v))
}
//GetExecID gets ExecID, Tag 17
func (m DontKnowTrade) GetExecID() (v string, err quickfix.MessageRejectError) {
var f field.ExecIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetLastPx gets LastPx, Tag 31
func (m DontKnowTrade) GetLastPx() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.LastPxField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetLastShares gets LastShares, Tag 32
func (m DontKnowTrade) GetLastShares() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.LastSharesField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrderID gets OrderID, Tag 37
func (m DontKnowTrade) GetOrderID() (v string, err quickfix.MessageRejectError) {
var f field.OrderIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrderQty gets OrderQty, Tag 38
func (m DontKnowTrade) GetOrderQty() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.OrderQtyField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSide gets Side, Tag 54
func (m DontKnowTrade) GetSide() (v enum.Side, err quickfix.MessageRejectError) {
var f field.SideField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSymbol gets Symbol, Tag 55
func (m DontKnowTrade) GetSymbol() (v string, err quickfix.MessageRejectError) {
var f field.SymbolField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetText gets Text, Tag 58
func (m DontKnowTrade) GetText() (v string, err quickfix.MessageRejectError) {
var f field.TextField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetDKReason gets DKReason, Tag 127
func (m DontKnowTrade) GetDKReason() (v enum.DKReason, err quickfix.MessageRejectError) {
var f field.DKReasonField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//HasExecID returns true if ExecID is present, Tag 17
func (m DontKnowTrade) HasExecID() bool {
return m.Has(tag.ExecID)
}
//HasLastPx returns true if LastPx is present, Tag 31
func (m DontKnowTrade) HasLastPx() bool {
return m.Has(tag.LastPx)
}
//HasLastShares returns true if LastShares is present, Tag 32
func (m DontKnowTrade) HasLastShares() bool {
return m.Has(tag.LastShares)
}
//HasOrderID returns true if OrderID is present, Tag 37
func (m DontKnowTrade) HasOrderID() bool {
return m.Has(tag.OrderID)
}
//HasOrderQty returns true if OrderQty is present, Tag 38
func (m DontKnowTrade) HasOrderQty() bool {
return m.Has(tag.OrderQty)
}
//HasSide returns true if Side is present, Tag 54
func (m DontKnowTrade) HasSide() bool {
return m.Has(tag.Side)
}
//HasSymbol returns true if Symbol is present, Tag 55
func (m DontKnowTrade) HasSymbol() bool {
return m.Has(tag.Symbol)
}
//HasText returns true if Text is present, Tag 58
func (m DontKnowTrade) HasText() bool {
return m.Has(tag.Text)
}
//HasDKReason returns true if DKReason is present, Tag 127
func (m DontKnowTrade) HasDKReason() bool {
return m.Has(tag.DKReason)
}
|
package externalservices
import (
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"poliskarta/api/structs"
"strings"
"sync"
)
func CallMapQuest(policeEvent *structs.PoliceEvent, credentials structs.Credentials, wg *sync.WaitGroup) {
mapURL := "http://open.mapquestapi.com/geocoding/v1/address?key=" + credentials.Mapquestkey + "&outFormat=xml&location="
defer wg.Done()
if len(policeEvent.Location.Words) > 0 {
for i := 0; i < len(policeEvent.Location.Words); i++ {
wordsToSearchWith := URLifyString(policeEvent.Location.Words[i:])
httpResponse, httpErr := http.Get(mapURL + wordsToSearchWith)
var xmlResponse []byte
var ioErr error
if httpErr != nil {
policeEvent.Location.Latitude = 0
policeEvent.Location.Longitude = 0
policeEvent.Location.SearchWords = append(policeEvent.Location.SearchWords, "<N/A>")
return
} else {
defer httpResponse.Body.Close()
xmlResponse, ioErr = ioutil.ReadAll(httpResponse.Body)
if ioErr != nil {
policeEvent.Location.Latitude = 0
policeEvent.Location.Longitude = 0
policeEvent.Location.SearchWords = append(policeEvent.Location.SearchWords, "<N/A>")
break
} else {
geoLocation := geolocationXMLtoStructs(xmlResponse)
resultIsGood, connectErr := evaluateGeoLocation(geoLocation)
if connectErr != nil {
policeEvent.Location.Latitude = 0
policeEvent.Location.Longitude = 0
policeEvent.Location.SearchWords = append(policeEvent.Location.SearchWords, "<N/A>")
break
} else if resultIsGood {
policeEvent.Location.Latitude = geoLocation.Locations[0].LocationAlternatives[0].Latitude
policeEvent.Location.Longitude = geoLocation.Locations[0].LocationAlternatives[0].Longitude
policeEvent.Location.SearchWords = policeEvent.Location.Words[i:]
break
}
}
}
}
}
}
func URLifyString(sliceToURLify []string) string {
str := ""
for _, word := range sliceToURLify {
str += word + " "
}
str = url.QueryEscape(str)
str = strings.TrimSuffix(str, "+")
return str
}
func evaluateGeoLocation(geoLocation structs.GeoLocation) (bool, error) {
var err error
if len(geoLocation.Locations) > 0 {
if geoLocation.Locations[0].LocationAlternatives != nil {
return true, err
} else {
return false, err
}
} else {
return false, errors.New("Communication error with mapquest.com")
}
}
func geolocationXMLtoStructs(XMLresponse []byte) structs.GeoLocation {
var geoLocation structs.GeoLocation
err := xml.Unmarshal(XMLresponse, &geoLocation)
if err != nil {
fmt.Println("Geo XML-Struct-error: ", err.Error())
}
return geoLocation
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type Const struct {
Xpr ast.Node
Consttype Oid
Consttypmod int32
Constcollid Oid
Constlen int
Constvalue Datum
Constisnull bool
Constbyval bool
Location int
}
func (n *Const) Pos() int {
return n.Location
}
|
package install
const (
StrategyErrReasonComponentMissing = "ComponentMissing"
StrategyErrReasonAnnotationsMissing = "AnnotationsMissing"
StrategyErrReasonWaiting = "Waiting"
StrategyErrReasonInvalidStrategy = "InvalidStrategy"
StrategyErrReasonTimeout = "Timeout"
StrategyErrReasonUnknown = "Unknown"
StrategyErrBadPatch = "PatchUnsuccessful"
StrategyErrDeploymentUpdated = "DeploymentUpdated"
StrategyErrInsufficientPermissions = "InsufficentPermissions"
)
// unrecoverableErrors are the set of errors that mean we can't recover an install strategy
var unrecoverableErrors = map[string]struct{}{
StrategyErrReasonInvalidStrategy: {},
StrategyErrReasonTimeout: {},
StrategyErrBadPatch: {},
StrategyErrInsufficientPermissions: {},
}
// StrategyError is used to represent error types for install strategies
type StrategyError struct {
Reason string
Message string
}
var _ error = StrategyError{}
// Error implements the Error interface.
func (e StrategyError) Error() string {
return e.Message
}
// IsErrorUnrecoverable reports if a given strategy error is one of the predefined unrecoverable types
func IsErrorUnrecoverable(err error) bool {
if err == nil {
return false
}
_, ok := unrecoverableErrors[ReasonForError(err)]
return ok
}
func ReasonForError(err error) string {
switch t := err.(type) {
case StrategyError:
return t.Reason
case *StrategyError:
return t.Reason
}
return StrategyErrReasonUnknown
}
|
package env
import "os"
// GetWithDefault simplifies accessing env variables by allowing you get a default if the variable is not set
func GetWithDefault(key string, substitute string) string {
s := os.Getenv(key)
if s == "" {
s = substitute
}
return s
}
|
package main
import (
"log"
"net/http"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/render"
"github.com/dlockamy/goRouter/config"
"github.com/dlockamy/goRouter/todo"
)
func Routes(configuration *config.Config) *chi.Mux {
router := chi.NewRouter()
router.Use(
render.SetContentType(render.ContentTypeJSON),
middleware.Logger,
middleware.DefaultCompress,
middleware.RedirectSlashes,
middleware.Recoverer,
)
router.Route("/v1", func(r chi.Router) {
r.Mount("/api/todo", todo.Routes(configuration))
})
return router
}
func main() {
configuration, err := config.New()
if err != nil {
log.Panicln("Configuration error", err)
}
router := Routes(configuration)
walkFunc := func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error {
log.Printf("%s %s\n", method, route)
return nil
}
if err := chi.Walk(router, walkFunc); err != nil {
log.Panicf("Loggine err: %s\n", err.Error())
}
log.Println("Serving application at PORT :" + configuration.Constants.PORT)
log.Fatal(http.ListenAndServe(":"+configuration.Constants.PORT, router))
}
|
package middlerware
import (
"github.com/go-redis/redis"
"../config"
)
var rc *redis.Client
func init() {
dsn := config.AppConfig.Redis.Host + ":" + config.AppConfig.Redis.Port
rc = redis.NewClient(&redis.Options{
Addr: dsn,
Password: config.AppConfig.Redis.Password,
})
Cont.Register("redis", rc)
}
func GetRedis(r interface{}) *redis.Client {
rc, ok := r.(*redis.Client)
if !ok {
return nil
}
return rc
}
|
package mapper
import (
"database/sql"
"github.com/fatih/structs"
"github.com/xormplus/xorm"
entity "mix/test/entity/cold"
"regexp"
"mix/test/utils/mysql"
)
func CreateDecryptionLog(session *xorm.Session, item *entity.DecryptionLog) (sql.Result, error) {
return session.SqlMapClient("CreateDecryptionLog", item.Map()).Execute()
}
func GetDecryptionLog(session *xorm.Session, id int64) (item *entity.DecryptionLog, err error) {
item = new(entity.DecryptionLog)
res := session.SqlMapClient("GetDecryptionLog", id).GetFirst(item)
if !res.Has {
item = nil
}
err = res.Error
return
}
func GetDecryptionLogList(session *xorm.Session) (items []*entity.DecryptionLog, err error) {
err = session.SqlMapClient("GetDecryptionLogList").Find(&items)
return
}
func RemoveDecryptionLog(session *xorm.Session, id int64) (sql.Result, error) {
return session.SqlMapClient("RemoveDecryptionLog", id).Execute()
}
func UpdateDecryptionLog(engine *xorm.EngineGroup, session *xorm.Session, item *entity.DecryptionLog) (res sql.Result, err error) {
tpl, err := mysql.Template("UpdateDecryptionLog", engine.GetSql("UpdateDecryptionLog"))
if err != nil {
return
}
reg := regexp.MustCompile(`,\s*where`)
itemMap := structs.Map(item)
execSql, err := tpl.Execute(itemMap)
if err != nil {
return
}
execSql = reg.ReplaceAllString(execSql, " where")
res, err = session.SQL(execSql, &itemMap).Execute()
return
}
|
package deferred
import "fmt"
func Run(){
i:=1
defer fmt.Printf("deferred1 : %d\n",i)
i++
defer fmt.Printf("deferred2 : %d\n",i)
i++
fmt.Println(i)
}
|
package router
import (
"context"
"fmt"
"net/http"
"os"
"runtime/debug"
"strconv"
"strings"
"boiler/pkg/entity"
"boiler/pkg/errors"
"boiler/pkg/store/config"
"github.com/go-chi/chi/middleware"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwt"
"github.com/rs/zerolog/log"
)
// Recoverer recover from panic
func Recoverer(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
defer func() {
if rvr := recover(); rvr != nil {
logEntry := middleware.GetLogEntry(r)
caller := errors.Caller()
if logEntry != nil {
logEntry.Panic(rvr, debug.Stack())
stackTitle, stackTrace := errors.GetStack()
fmt.Fprintf(
os.Stderr,
"%s\n %s\n",
stackTitle,
strings.Join(stackTrace, "\n "),
)
} else if e, is := rvr.(error); is {
log.Error().Err(e).Str("file", caller).Msg("panic")
} else {
log.Error().Str("file", caller).Msg(rvr.(string))
}
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
}
}()
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// AuthUserMiddleware parse JWT Token and inject it back as a *entity.AuthUser from request if available
func AuthUserMiddleware(cfg *config.Config) func(next http.Handler) http.Handler {
prefixLen := len("Bearer ")
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if raw := r.Header.Get("Authorization"); len(raw) > prefixLen {
token, err := jwt.ParseString(raw[prefixLen:], jwt.WithVerify(jwa.RS256, &cfg.JWT.PrivateKey.PublicKey))
if err == nil && jwt.Verify(token) == nil {
id, err := strconv.ParseInt(token.Subject(), 10, 64)
if err == nil {
next.ServeHTTP(w, r.WithContext(
context.WithValue(
r.Context(), config.ContextKeyAuthenticationUser{}, &entity.JWTUser{
ID: id,
},
),
))
return
}
}
}
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
}
|
package tools
import "github.com/json-iterator/go/extra"
func init() {
extra.RegisterFuzzyDecoders()
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"math"
"testing"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/testkit/testutil"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/stretchr/testify/require"
)
func TestUnary(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args interface{}
expected interface{}
overflow bool
getErr bool
}{
{uint64(9223372036854775809), "-9223372036854775809", true, false},
{uint64(9223372036854775810), "-9223372036854775810", true, false},
{uint64(9223372036854775808), int64(-9223372036854775808), false, false},
{int64(math.MinInt64), "9223372036854775808", true, false}, // --9223372036854775808
}
sc := ctx.GetSessionVars().StmtCtx
origin := sc.InSelectStmt
sc.InSelectStmt = true
defer func() {
sc.InSelectStmt = origin
}()
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.UnaryMinus, primitiveValsToConstants(ctx, []interface{}{c.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if !c.getErr {
require.NoError(t, err)
if !c.overflow {
require.Equal(t, c.expected, d.GetValue())
} else {
require.Equal(t, c.expected, d.GetMysqlDecimal().String())
}
} else {
require.Error(t, err)
}
}
_, err := funcs[ast.UnaryMinus].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestLogicAnd(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected int64
isNil bool
getErr bool
}{
{[]interface{}{1, 1}, 1, false, false},
{[]interface{}{1, 0}, 0, false, false},
{[]interface{}{0, 1}, 0, false, false},
{[]interface{}{0, 0}, 0, false, false},
{[]interface{}{2, -1}, 1, false, false},
{[]interface{}{"a", "0"}, 0, false, false},
{[]interface{}{"a", "1"}, 0, false, false},
{[]interface{}{"1a", "0"}, 0, false, false},
{[]interface{}{"1a", "1"}, 1, false, false},
{[]interface{}{0, nil}, 0, false, false},
{[]interface{}{nil, 0}, 0, false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{0.001, 0}, 0, false, false},
{[]interface{}{0.001, 1}, 1, false, false},
{[]interface{}{nil, 0.000}, 0, false, false},
{[]interface{}{nil, 0.001}, 0, true, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 0}, 0, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 1}, 1, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000000"), nil}, 0, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), nil}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LogicAnd, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.LogicAnd, NewZero())
require.Error(t, err)
_, err = funcs[ast.LogicAnd].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestLeftShift(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
expected uint64
isNil bool
getErr bool
}{
{[]interface{}{123, 2}, uint64(492), false, false},
{[]interface{}{-123, 2}, uint64(18446744073709551124), false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LeftShift, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetUint64())
}
}
}
}
func TestRightShift(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
expected uint64
isNil bool
getErr bool
}{
{[]interface{}{123, 2}, uint64(30), false, false},
{[]interface{}{-123, 2}, uint64(4611686018427387873), false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.RightShift, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetUint64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.RightShift, NewZero())
require.Error(t, err)
_, err = funcs[ast.RightShift].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestBitXor(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
expected uint64
isNil bool
getErr bool
}{
{[]interface{}{123, 321}, uint64(314), false, false},
{[]interface{}{-123, 321}, uint64(18446744073709551300), false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Xor, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetUint64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.Xor, NewZero())
require.Error(t, err)
_, err = funcs[ast.Xor].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestBitOr(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected uint64
isNil bool
getErr bool
}{
{[]interface{}{123, 321}, uint64(379), false, false},
{[]interface{}{-123, 321}, uint64(18446744073709551557), false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Or, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetUint64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.Or, NewZero())
require.Error(t, err)
_, err = funcs[ast.Or].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestLogicOr(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected int64
isNil bool
getErr bool
}{
{[]interface{}{1, 1}, 1, false, false},
{[]interface{}{1, 0}, 1, false, false},
{[]interface{}{0, 1}, 1, false, false},
{[]interface{}{0, 0}, 0, false, false},
{[]interface{}{2, -1}, 1, false, false},
{[]interface{}{"a", "0"}, 0, false, false},
{[]interface{}{"a", "1"}, 1, false, false},
{[]interface{}{"1a", "0"}, 1, false, false},
{[]interface{}{"1a", "1"}, 1, false, false},
{[]interface{}{"0.0a", 0}, 0, false, false},
{[]interface{}{"0.0001a", 0}, 1, false, false},
{[]interface{}{1, nil}, 1, false, false},
{[]interface{}{nil, 1}, 1, false, false},
{[]interface{}{nil, 0}, 0, true, false},
{[]interface{}{0.000, 0}, 0, false, false},
{[]interface{}{0.001, 0}, 1, false, false},
{[]interface{}{nil, 0.000}, 0, true, false},
{[]interface{}{nil, 0.001}, 1, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000000"), 0}, 0, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000000"), 1}, 1, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000000"), nil}, 0, true, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 0}, 1, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 1}, 1, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), nil}, 1, false, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LogicOr, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.LogicOr, NewZero())
require.Error(t, err)
_, err = funcs[ast.LogicOr].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestBitAnd(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
expected int64
isNil bool
getErr bool
}{
{[]interface{}{123, 321}, 65, false, false},
{[]interface{}{-123, 321}, 257, false, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.And, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.And, NewZero())
require.Error(t, err)
_, err = funcs[ast.And].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestBitNeg(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected uint64
isNil bool
getErr bool
}{
{[]interface{}{123}, uint64(18446744073709551492), false, false},
{[]interface{}{-123}, uint64(122), false, false},
{[]interface{}{nil}, 0, true, false},
{[]interface{}{errors.New("must error")}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.BitNeg, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetUint64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.BitNeg, NewZero(), NewZero())
require.Error(t, err)
_, err = funcs[ast.BitNeg].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestUnaryNot(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected int64
isNil bool
getErr bool
}{
{[]interface{}{1}, 0, false, false},
{[]interface{}{0}, 1, false, false},
{[]interface{}{123}, 0, false, false},
{[]interface{}{-123}, 0, false, false},
{[]interface{}{"123"}, 0, false, false},
{[]interface{}{float64(0.3)}, 0, false, false},
{[]interface{}{"0.3"}, 0, false, false},
{[]interface{}{types.NewDecFromFloatForTest(0.3)}, 0, false, false},
{[]interface{}{nil}, 0, true, false},
{[]interface{}{types.CreateBinaryJSON(int64(0))}, 1, false, false},
{[]interface{}{types.CreateBinaryJSON(map[string]interface{}{"test": "test"})}, 0, false, false},
{[]interface{}{errors.New("must error")}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.UnaryNot, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.UnaryNot, NewZero(), NewZero())
require.Error(t, err)
_, err = funcs[ast.UnaryNot].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestIsTrueOrFalse(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
testCases := []struct {
args []interface{}
isTrue interface{}
isFalse interface{}
}{
{
args: []interface{}{-12},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{12},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{0},
isTrue: 0,
isFalse: 1,
},
{
args: []interface{}{float64(0)},
isTrue: 0,
isFalse: 1,
},
{
args: []interface{}{"aaa"},
isTrue: 0,
isFalse: 1,
},
{
args: []interface{}{""},
isTrue: 0,
isFalse: 1,
},
{
args: []interface{}{"0.3"},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{float64(0.3)},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{types.NewDecFromFloatForTest(0.3)},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{nil},
isTrue: 0,
isFalse: 0,
},
{
args: []interface{}{types.NewDuration(0, 0, 0, 1000, 3)},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{types.NewDuration(0, 0, 0, 0, 3)},
isTrue: 0,
isFalse: 1,
},
{
args: []interface{}{types.NewTime(types.FromDate(0, 0, 0, 0, 0, 0, 1000), mysql.TypeDatetime, 3)},
isTrue: 1,
isFalse: 0,
},
{
args: []interface{}{types.NewTime(types.CoreTime(0), mysql.TypeTimestamp, 3)},
isTrue: 0,
isFalse: 1,
},
}
for _, tc := range testCases {
isTrueSig, err := funcs[ast.IsTruthWithoutNull].getFunction(ctx, datumsToConstants(types.MakeDatums(tc.args...)))
require.NoError(t, err)
require.NotNil(t, isTrueSig)
isTrue, err := evalBuiltinFunc(isTrueSig, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(tc.isTrue), isTrue)
}
for _, tc := range testCases {
isFalseSig, err := funcs[ast.IsFalsity].getFunction(ctx, datumsToConstants(types.MakeDatums(tc.args...)))
require.NoError(t, err)
require.NotNil(t, isFalseSig)
isFalse, err := evalBuiltinFunc(isFalseSig, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(tc.isFalse), isFalse)
}
}
func TestLogicXor(t *testing.T) {
ctx := createContext(t)
sc := ctx.GetSessionVars().StmtCtx
origin := sc.IgnoreTruncate.Load()
defer func() {
sc.IgnoreTruncate.Store(origin)
}()
sc.IgnoreTruncate.Store(true)
cases := []struct {
args []interface{}
expected int64
isNil bool
getErr bool
}{
{[]interface{}{1, 1}, 0, false, false},
{[]interface{}{1, 0}, 1, false, false},
{[]interface{}{0, 1}, 1, false, false},
{[]interface{}{0, 0}, 0, false, false},
{[]interface{}{2, -1}, 0, false, false},
{[]interface{}{"a", "0"}, 0, false, false},
{[]interface{}{"a", "1"}, 1, false, false},
{[]interface{}{"1a", "0"}, 1, false, false},
{[]interface{}{"1a", "1"}, 0, false, false},
{[]interface{}{0, nil}, 0, true, false},
{[]interface{}{nil, 0}, 0, true, false},
{[]interface{}{nil, 1}, 0, true, false},
{[]interface{}{0.5000, 0.4999}, 0, false, false},
{[]interface{}{0.5000, 1.0}, 0, false, false},
{[]interface{}{0.4999, 1.0}, 0, false, false},
{[]interface{}{nil, 0.000}, 0, true, false},
{[]interface{}{nil, 0.001}, 0, true, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 0.00001}, 0, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), 1}, 0, false, false},
{[]interface{}{types.NewDecFromStringForTest("0.000000"), nil}, 0, true, false},
{[]interface{}{types.NewDecFromStringForTest("0.000001"), nil}, 0, true, false},
{[]interface{}{errors.New("must error"), 1}, 0, false, true},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LogicXor, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
// Test incorrect parameter count.
_, err := newFunctionForTest(ctx, ast.LogicXor, NewZero())
require.Error(t, err)
_, err = funcs[ast.LogicXor].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-11-10 19:41
# @File : lt_713_Subarray_Product_Less_Than_K.go
# @Description :
# @Attention :
*/
package two_points
/*
排列组合问题: 计算相乘的积小于k的组合数的总和
双指针
左指针的每次移动,都需要将当前的值相除直到小于目标值之后右指针继续移动
*/
//
// func numSubarrayProductLessThanK(nums []int, k int) int {
// if k < 0 {
// return 0
// }
//
// value, left, right, res := 1, 0, 0, 0
// for ; right < len(nums); right++ {
// value *= nums[right]
// for value >= k && left< len(nums) {
// value /= nums[left]
// left++
// }
// res+=right-left+1
// }
//
// return res
// }
func numSubarrayProductLessThanK(nums []int, k int) int {
res := 0
prod := 1
i := 0
for j := 0; j < len(nums); j++ {
prod *= nums[j]
for i <= j && prod >= k {
prod /= nums[i]
i++
}
res += j-i+1
}
return res
}
|
package cmd
import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
"strconv"
)
var blockTxsCmd = &cobra.Command{
Use: "block-txs [height]",
Short: "query txs in block by height.",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
blockHeight := args[0]
height, err := strconv.ParseInt(blockHeight, 10, 64)
if err != nil {
return err
}
blockStoreID := "blockstore"
db, err := dbm.NewDB(blockStoreID, dbm.GoLevelDBBackend, dataPath)
blockStore := store.NewBlockStore(db)
block := blockStore.LoadBlock(height)
txs := block.Txs
txLen := len(txs)
txHashes := make([]string, txLen)
for i, txBytes := range txs {
txHashes[i] = fmt.Sprintf("%X", tmhash.Sum(txBytes))
}
cdc := amino.NewCodec()
dbId := "tx_index"
txDB, err := dbm.NewDB(dbId, dbm.GoLevelDBBackend, dataPath)
validateError(err)
var txResultList []*types.TxResult
for _, txHash := range txHashes {
hash, _ := hex.DecodeString(txHash)
rawBytes, err := txDB.Get(hash)
validateError(err)
txResult := new(types.TxResult)
err = cdc.UnmarshalBinaryBare(rawBytes, &txResult)
validateError(err)
txResultList = append(txResultList, txResult)
}
result, err := json.MarshalIndent(txResultList, " ", "")
validateError(err)
fmt.Printf(string(result))
return nil
return nil
},
}
|
package sip
import (
"strings"
uriLib "github.com/superirale/sipserver/uri"
"github.com/superirale/sipserver/utils"
// "fmt"
)
// Authorization struct
type Authorization struct {
AuthType string
Username string
Realm string
Nonce string
Uri uriLib.URI
Response string
Algorithm string
isAuthSet bool
}
// IsAuthSet method checks if isAuthSet is set
func (auth *Authorization) IsAuthSet() bool {
return auth.isAuthSet
}
// BuildAuthorizationHeader function builds authorization header
func BuildAuthorizationHeader(auth string) Authorization {
var authorize Authorization
authorize.isAuthSet = true
authArr := strings.Split(auth, " ")
// username="usmanirale",realm="0.0.0.0:5060",nonce="8b78cfb87e909e14c61a6cceeb5a0c7c",uri="sip:192.168.8.102",response="b0b1d50e70616c8a73dc8a8da52b8f99",algorithm=MD5
// fmt.Println(authArr[2])
// fmt.Println(authArr)
for _, prop := range authArr {
prop = strings.Replace(prop, ",", "", 1)
// fmt.Println(prop)
isURIString := strings.Contains(prop, "uri")
if isURIString {
tags := make(map[string]string)
uriText := strings.Replace(prop, "\"", "", 2)
uriText = strings.Replace(uriText, "uri=", "", 1)
// build Uri
// uri="sip:192.168.8.101;transport=UDP"
// fmt.Println(uriText)
// fmt.Println(prop)
uriArr := strings.Split(prop, ";")
// uri="sip:192.168.8.101
uriStrArr := strings.Split(prop, "=")
// "sip:192.168.8.101
uri := strings.Replace(uriStrArr[1], "\"", "", 1)
if len(uriArr) > 1 {
for c := 1; c < len(uriArr); c++ {
// transport=UDP"
tagArr := strings.Split(uriArr[c], "=")
if len(tagArr) == 2 {
key := tagArr[0]
value := tagArr[1]
tags[key] = strings.Replace(value, "\"", "", 1)
}
}
}
uriObj := uriLib.BuildURI(uri, uriText, tags)
authorize.Uri = *uriObj
} else {
propArr := strings.Split(prop, "=")
if len(propArr) > 1 {
propTitleCase := strings.Title(propArr[0])
if propTitleCase != "uri" {
propValue := strings.Replace(propArr[1], "\"", "", 2)
utils.SetField(&authorize, propTitleCase, propValue)
}
}
}
}
return authorize
}
|
package LatticeReduction
import (
"fmt"
"math/rand"
"testing"
)
var (
SmallBasisTest = Int64Basis{
[]int64{1, 1, 1},
[]int64{-1, 0, 2},
[]int64{3, 5, 6},
}
LargeBasisTest = SmallBasisTest.PremoteToBig()
)
const size = 35
func TestXx(t *testing.T) {
cases:=250
if testing.Short(){
cases=10
}
count:=0
for i := 0; i < cases; i++ {
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0xFFFFFF)
}
}
notDeep,deep:=L3FP(basis, 0.75).(Int64Basis).Mod(), L3FPDeep(basis, 0.75,4).(Int64Basis).Mod()
if notDeep<=deep{
count++
}
}
t.Logf("L3FP %d: L3FPDeep %d (events smaller)",count,cases-count)
}
func TestXxx(t *testing.T) {
cases:=250
if testing.Short(){
cases=10
}
count:=0
for i := 0; i < cases; i++ {
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0xFFFFFF)
}
}
notDeep,deep:=L3FP(basis, 0.75).(Int64Basis).Mod(), PairwiseReduce(L3FP(basis, 0.75)).(Int64Basis).Mod()
if notDeep<=deep{
count++
}
}
t.Logf("L3FP %d: Pairwise L3FP %d (events smaller)",count,cases-count)
}
func TestXxxx(t *testing.T) {
cases:=250
if testing.Short(){
cases=10
}
count:=0
for i := 0; i < cases; i++ {
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0xFFFFFF)
}
}
notDeep,deep:=L3FPDeep(basis, 0.75,4).(Int64Basis).Mod(), PairwiseReduce(L3FPDeep(basis, 0.75,4)).(Int64Basis).Mod()
if notDeep<=deep{
count++
}
}
t.Logf("L3FPDeep %d: Pairwise L3FPDeep %d (events smaller)",count,cases-count)
}
func BenchmarkSmallBasisL3FP20x20(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0x7FFFFFFFFFFFFFF)
}
}
b.StartTimer()
_ = L3FP(basis, 0.75)
}
}
func BenchmarkSmallBasisL3FPDeep20x20(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0x7FFFFFFFFFFFFFF)
}
}
b.StartTimer()
_ = L3FPDeep(basis, 0.75, 4)
}
}
func BenchmarkLargeBasisL3FP20x20(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0x7FFFFFFFFFFFFFF)
}
}
large := basis.PremoteToBig()
b.StartTimer()
_ = L3FP(large, 0.75)
}
}
func BenchmarkLargeBasisL3FPDeep20x20(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
basis := make(Int64Basis, size)
for j := 0; j < size; j++ {
v := make([]int64, size)
basis[j] = v
for k := 0; k < size; k++ {
v[k] = rand.Int63n(0x7FFFFFFFFFFFFFF)
}
}
large := basis.PremoteToBig()
b.StartTimer()
_ = L3FPDeep(large, 0.75,4)
}
}
func ExampleReduceSmallL3FP() {
fmt.Println(SmallBasisTest)
fmt.Println(L3FP(SmallBasisTest, 0.75))
// Output:
// [1 1 1]
// [-1 0 2]
// [3 5 6]
//
// [0 1 0]
// [1 0 1]
// [-1 0 2]
}
func ExampleReduceBigL3FP() {
fmt.Println(LargeBasisTest)
fmt.Println(L3FP(LargeBasisTest, 0.75))
// Output:
// [1 1 1]
// [-1 0 2]
// [3 5 6]
//
// [0 1 0]
// [1 0 1]
// [-1 0 2]
}
func ExampleReduceSmallL3FPDeep() {
fmt.Println(SmallBasisTest)
fmt.Println(L3FPDeep(SmallBasisTest, 0.75, 4))
// Output:
// [1 1 1]
// [-1 0 2]
// [3 5 6]
//
// [0 1 0]
// [1 0 1]
// [-1 0 2]
}
func ExampleReduceBigL3FPDeep() {
fmt.Println(LargeBasisTest)
fmt.Println(L3FPDeep(LargeBasisTest, 0.75, 4))
// Output:
// [1 1 1]
// [-1 0 2]
// [3 5 6]
//
// [0 1 0]
// [1 0 1]
// [-1 0 2]
}
|
package main
import (
"context"
"io/ioutil"
"log"
"net"
"os"
"time"
task "github.com/HarshVaragiya/LearningGo/Protobuf/gRPC/taskproto"
grpc "google.golang.org/grpc"
"google.golang.org/protobuf/proto"
)
var dataStore = "task-datastore.pb"
type taskServer struct {
}
func (s taskServer) List(ctx context.Context, void *task.Void) (*task.TaskList, error) {
log.Println("client requested task list.")
return listTasks()
}
func (s taskServer) Add(ctx context.Context, newTask *task.Task) (*task.Void, error) {
log.Printf("client requested adding new task: %s", newTask.Name)
newTask.Added = time.Now().Format(time.RFC850)
err := addTask(newTask)
return &task.Void{}, err
}
func main() {
log.Println("Starting TaskManager gRPC Server")
srv := grpc.NewServer()
var taskserver taskServer
log.Println("Registering task server")
task.RegisterTasksServer(srv, taskserver)
listener, err := net.Listen("tcp", ":8888")
if err != nil {
log.Fatalf("could not start listener. error = %v", err)
}
log.Println("attached to listener port. serving gRPC service")
err = srv.Serve(listener)
if err != nil {
log.Fatal(err)
}
}
func addTask(taskObject *task.Task) error {
file, err := os.OpenFile(dataStore, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
log.Printf("error opening datastore file %v. error = %v", dataStore, err)
return err
}
existingDataStoreBytes, err := ioutil.ReadAll(file)
if err != nil {
log.Printf("error reading datastore file %v. err = %v", dataStore, err)
return err
} else {
log.Printf("read %v bytes from datastore", len(existingDataStoreBytes))
}
var taskList task.TaskList
if err := proto.Unmarshal(existingDataStoreBytes, &taskList); err != nil {
log.Println("error decoding datastore information. existing data might be corrupted.")
log.Printf("error = %v", err)
return err
}
taskList.Tasks = append(taskList.Tasks, taskObject)
outputBytes, err := proto.Marshal(&taskList)
if err != nil {
log.Printf("error marshalling the task list object. error = %v", err)
return err
}
if n, err := file.WriteAt(outputBytes, 0x00); err != nil {
log.Print("error writing bytes in the file. err = %v", err)
return err
} else {
log.Printf("wrote %v bytes to datastore", n)
}
if err = file.Close(); err != nil {
log.Printf("error closing datastore. error = %v", err)
log.Printf("exiting to prevent data corruption.")
return err
}
return nil
}
func listTasks() (*task.TaskList, error) {
var taskList task.TaskList
dataStoreBytes, err := ioutil.ReadFile(dataStore)
if err != nil {
log.Printf("error opening data storage file %s : %v ", dataStore, err)
return &taskList, err
} else {
log.Printf("read %v bytes from datastore", len(dataStoreBytes))
}
if err = proto.Unmarshal(dataStoreBytes, &taskList); err != nil {
log.Println("error decoding datastore information. existing data might be corrupted.")
log.Printf("error = %v", err)
return &taskList, err
} else {
log.Println("sucessfully retrieved task list from datastore")
}
return &taskList, nil
}
|
package util
import (
"sync"
"testing"
)
func TestWrite(t *testing.T) {
p := "/var/log/6ryim_test/6ryim_test.log"
w, err := NewWriter(p)
if err != nil {
t.Error(err)
}
defer w.Close()
var wg sync.WaitGroup
for i := 0; i <= 500; i++ {
go func() {
for k := 0; k < 10000; k++ {
_, err = w.Write([]byte("this is a log\n"))
if err != nil {
t.Error(err)
}
}
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
}
|
package resolvers
type Publication struct {
ID int `json:"id"`
Title string `json:"title"`
URI string `json:"uri"`
Date string `json:"date"`
}
var test1 = Publication{
ID: 01,
Title: "test title 1",
URI: "www.testuri1.com",
Date: "testdate1",
}
var test2 = Publication{
ID: 02,
Title: "test title 2",
URI: "www.testuri2.com",
Date: "testdate2",
}
var publications = []Publication{test1, test2}
func GetPublications() []Publication {
return publications
}
func GetPublication(id int) Publication {
for _, pub := range publications {
if pub.ID == id {
return pub
}
}
return Publication{}
}
func CreatePublication(id int, title, uri, dateAdded string) Publication {
toReturn := Publication{
ID: id,
Title: title,
URI: uri,
Date: dateAdded,
}
publications = append(publications, toReturn)
return toReturn
}
|
package main
import (
"fmt"
"math"
)
func Sqrt(x float64) (float64, int) {
aproximation := func(z, x float64) float64 {
return z - ((z*z)-x)/(2*z)
}
i := 0
z := aproximation(1.0, x)
for math.Abs(aproximation(z, x)-z) > 0.000001 {
z = aproximation(z, x)
i++
}
return z, i
}
func main() {
for i := 1.0; i < 11.0; i++ {
ours, iterations := Sqrt(i)
fmt.Printf("%d: in %d iterations %f vs %f\n", int(i), iterations, ours, math.Sqrt(i))
}
}
|
package galice
import (
"bytes"
"errors"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
)
func TestPing(t *testing.T) {
pingBody := `{
"meta": {
"locale": "ru-RU",
"timezone": "Europe/Moscow",
"client_id": "ru.yandex.searchplugin/5.80 (Samsung Galaxy; Android 4.4)",
"interfaces": {
"screen": { }
}
},
"request": {
"command": "ping",
"original_utterance": "ping",
"type": "SimpleUtterance",
"markup": {
"dangerous_context": false
}
},
"session": {
"new": true,
"message_id": 4,
"session_id": "2eac4854-fce721f3-b845abba-20d60",
"skill_id": "3ad36498-f5rd-4079-a14b-788652932056",
"user_id": "AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"
},
"version": "1.0"
}`
pongBody := `{"version":"1.0","session":{"new":true,"message_id":4,"session_id":"2eac4854-fce721f3-b845abba-20d60","skill_id":"3ad36498-f5rd-4079-a14b-788652932056","user_id":"AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"},"response":{"text":"pong","tts":"pong","end_session":false}}
`
cli := New(true, true)
h := cli.CreateHandler(func(i InputData) (OutputData, error) {
return OutputData{}, nil
})
req, err := http.NewRequest("POST", "/skill", bytes.NewReader([]byte(pingBody)))
require.NoError(t, err)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code)
require.Equal(t, pongBody, rr.Body.String())
require.Equal(t, "application/json", rr.Header().Get("Content-type"))
}
func TestDangerousContext(t *testing.T) {
dangerousBody := `{
"meta": {
"locale": "ru-RU",
"timezone": "Europe/Moscow",
"client_id": "ru.yandex.searchplugin/5.80 (Samsung Galaxy; Android 4.4)",
"interfaces": {
"screen": { }
}
},
"request": {
"command": "test",
"original_utterance": "test",
"type": "SimpleUtterance",
"markup": {
"dangerous_context": true
}
},
"session": {
"new": true,
"message_id": 4,
"session_id": "2eac4854-fce721f3-b845abba-20d60",
"skill_id": "3ad36498-f5rd-4079-a14b-788652932056",
"user_id": "AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"
},
"version": "1.0"
}`
respBody := `{"version":"1.0","session":{"new":true,"message_id":4,"session_id":"2eac4854-fce721f3-b845abba-20d60","skill_id":"3ad36498-f5rd-4079-a14b-788652932056","user_id":"AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"},"response":{"text":"Не понимаю, о чем вы. Пожалуйста, переформулируйте вопрос.","tts":"Не понимаю, о чем вы. Пожалуйста, переформулируйте вопрос.","end_session":false}}
`
cli := New(true, true)
h := cli.CreateHandler(func(i InputData) (OutputData, error) {
return OutputData{}, nil
})
req, err := http.NewRequest("POST", "/skill", bytes.NewReader([]byte(dangerousBody)))
require.NoError(t, err)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code)
require.Equal(t, respBody, rr.Body.String())
require.Equal(t, "application/json", rr.Header().Get("Content-type"))
}
func TestHandlingExpectedError(t *testing.T) {
cli := New(true, true)
errStr := ""
cli.SetLogger(func(err error) {
errStr = err.Error()
})
h := cli.CreateHandler(func(i InputData) (OutputData, error) {
return OutputData{}, errors.New("test")
})
req, err := http.NewRequest("POST", "/skill", bytes.NewReader([]byte("{}")))
require.NoError(t, err)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code)
require.Equal(t, "test", errStr)
}
func TestHandlingUnexpectedError(t *testing.T) {
cli := New(true, true)
errStr := ""
cli.SetLogger(func(err error) {
errStr = err.Error()
})
h := cli.CreateHandler(func(i InputData) (OutputData, error) {
panic(errors.New("test"))
})
req, err := http.NewRequest("POST", "/skill", bytes.NewReader([]byte("{}")))
require.NoError(t, err)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusInternalServerError, rr.Code)
require.Equal(t, "Unexpected error: test", errStr)
}
func TestGeneralResponse(t *testing.T) {
body := `{
"meta": {
"locale": "ru-RU",
"timezone": "Europe/Moscow",
"client_id": "ru.yandex.searchplugin/5.80 (Samsung Galaxy; Android 4.4)",
"interfaces": {
"screen": { }
}
},
"request": {
"command": "hi there",
"original_utterance": "hi there",
"type": "SimpleUtterance",
"markup": {
"dangerous_context": false
}
},
"session": {
"new": true,
"message_id": 4,
"session_id": "2eac4854-fce721f3-b845abba-20d60",
"skill_id": "3ad36498-f5rd-4079-a14b-788652932056",
"user_id": "AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"
},
"version": "1.0"
}`
resp := `{"version":"1.0","session":{"new":true,"message_id":4,"session_id":"2eac4854-fce721f3-b845abba-20d60","skill_id":"3ad36498-f5rd-4079-a14b-788652932056","user_id":"AC9WC3DF6FCE052E45A4566A48E6B7193774B84814CE49A922E163B8B29881DC"},"response":{"text":"test","tts":"test","buttons":[{"title":"button 1","hide":true,"url":"https://ya.ru","payload":123},{"title":"button 2","hide":false}],"end_session":true}}
`
cli := New(true, true)
h := cli.CreateHandler(func(i InputData) (OutputData, error) {
r := NewResponse("test", "", true)
r.AddButton("button 1", true, "https://ya.ru", 123)
r.AddButton("button 2", false, "", nil)
o := NewOutput(i, r)
return o, nil
})
req, err := http.NewRequest("POST", "/skill", bytes.NewReader([]byte(body)))
require.NoError(t, err)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code)
require.Equal(t, resp, rr.Body.String())
}
|
/*
Create a function that takes a integer number n and returns the formula for (a+b)^n as a string.
Examples
formula(0) ➞ "1"
formula(1) ➞ "a+b"
formula(2) ➞ "a^2+2ab+b^2"
formula(-2) ➞ "1/(a^2+2ab+b^2)"
formula(3) ➞ "a^3+3a^2b+3ab^2+b^3"
formula(5) ➞ "a^5+5a^4b+10a^3b^2+10a^2b^3+5ab^4+b^5"
Notes
Don't put the following in your string:
spaces
*
^1
a^0
b^0
*/
package main
import (
"bytes"
"fmt"
)
func main() {
assert(formula(0) == "1")
assert(formula(1) == "a+b")
assert(formula(2) == "a^2+2ab+b^2")
assert(formula(-2) == "1/(a^2+2ab+b^2)")
assert(formula(3) == "a^3+3a^2b+3ab^2+b^3")
assert(formula(5) == "a^5+5a^4b+10a^3b^2+10a^2b^3+5ab^4+b^5")
assert(formula(-1) == "1/(a+b)")
assert(formula(-4) == "1/(a^4+4a^3b+6a^2b^2+4ab^3+b^4)")
assert(formula(7) == "a^7+7a^6b+21a^5b^2+35a^4b^3+35a^3b^4+21a^2b^5+7ab^6+b^7")
assert(formula(13) == "a^13+13a^12b+78a^11b^2+286a^10b^3+715a^9b^4+1287a^8b^5+1716a^7b^6+1716a^6b^7+1287a^5b^8+715a^4b^9+286a^3b^10+78a^2b^11+13ab^12+b^13")
assert(formula(17) == "a^17+17a^16b+136a^15b^2+680a^14b^3+2380a^13b^4+6188a^12b^5+12376a^11b^6+19448a^10b^7+24310a^9b^8+24310a^8b^9+19448a^7b^10+12376a^6b^11+6188a^5b^12+2380a^4b^13+680a^3b^14+136a^2b^15+17ab^16+b^17")
assert(formula(19) == "a^19+19a^18b+171a^17b^2+969a^16b^3+3876a^15b^4+11628a^14b^5+27132a^13b^6+50388a^12b^7+75582a^11b^8+92378a^10b^9+92378a^9b^10+75582a^8b^11+50388a^7b^12+27132a^6b^13+11628a^5b^14+3876a^4b^15+969a^3b^16+171a^2b^17+19ab^18+b^19")
assert(formula(-23) == "1/(a^23+23a^22b+253a^21b^2+1771a^20b^3+8855a^19b^4+33649a^18b^5+100947a^17b^6+245157a^16b^7+490314a^15b^8+817190a^14b^9+1144066a^13b^10+1352078a^12b^11+1352078a^11b^12+1144066a^10b^13+817190a^9b^14+490314a^8b^15+245157a^7b^16+100947a^6b^17+33649a^5b^18+8855a^4b^19+1771a^3b^20+253a^2b^21+23ab^22+b^23)")
assert(formula(27) == "a^27+27a^26b+351a^25b^2+2925a^24b^3+17550a^23b^4+80730a^22b^5+296010a^21b^6+888030a^20b^7+2220075a^19b^8+4686825a^18b^9+8436285a^17b^10+13037895a^16b^11+17383860a^15b^12+20058300a^14b^13+20058300a^13b^14+17383860a^12b^15+13037895a^11b^16+8436285a^10b^17+4686825a^9b^18+2220075a^8b^19+888030a^7b^20+296010a^6b^21+80730a^5b^22+17550a^4b^23+2925a^3b^24+351a^2b^25+27ab^26+b^27")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func formula(n int) string {
if n == 0 {
return "1"
}
r := false
if n < 0 {
n = -n
r = true
}
w := new(bytes.Buffer)
for i := n; i >= 0; i-- {
j := n - i
b := binomial(n, j)
if b != 1 {
fmt.Fprintf(w, "%d", b)
}
if i > 0 {
fmt.Fprintf(w, "a")
if i > 1 {
fmt.Fprintf(w, "^%d", i)
}
}
if j > 0 {
fmt.Fprintf(w, "b")
if j > 1 {
fmt.Fprintf(w, "^%d", j)
}
}
fmt.Fprintf(w, "+")
}
s := w.String()
s = s[:len(s)-1]
if r {
s = fmt.Sprintf("1/(%s)", s)
}
return s
}
func binomial(n, k int) int {
if k < 0 || k > n {
return 0
}
if k == 0 || k == n {
return 1
}
k = min(k, n-k)
c := 1
for i := 0; i < k; i++ {
c = c * (n - i) / (i + 1)
}
return c
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
// taken from https://code.google.com/p/wsdl-go
package main
import "encoding/xml"
type definitions struct {
XMLName xml.Name `xml:"definitions"`
TargetNamespace string `xml:"targetNamespace,attr"`
Name string `xml:"name,attr"`
Types xmlType `xml:"types"`
Messages []message `xml:"message"`
PortType portType `xml:"portType"`
Binding []binding `xml:"binding"`
Service service `xml:"service"`
}
type xmlType struct {
Schemas []schema `xml:"schema"`
}
type message struct {
Name string `xml:"name,attr"`
Part part `xml:"part"`
}
type part struct {
Name string `xml:"name,attr"`
Element string `xml:"element,attr"`
}
type portType struct {
Name string `xml:"name,attr"`
Operations []portTypeOperation `xml:"operation"`
}
type portTypeOperation struct {
Name string `xml:"name,attr"`
Input portTypeOperationMessage `xml:"input"`
Output portTypeOperationMessage `xml:"output"`
Fault portTypeOperationMessage `xml:"fault"`
}
type portTypeOperationMessage struct {
Name string `xml:"name,attr,omitempty"`
Message string `xml:"message,attr"`
}
type binding struct {
Name string `xml:"name,attr"`
Type string `xml:"type,attr"`
SoapBinding soapBinding `xml:"binding"`
Operations []bindingOperation `xml:"operation"`
}
type soapBinding struct {
XMLName xml.Name `xml:"binding"`
Transport string `xml:"transport,attr"`
Style string `xml:"style,attr"`
}
type bindingOperation struct {
Name string `xml:"name,attr"`
SoapOperation soapOperation `xml:"operation"`
Input soapBodyIO `xml:"input"`
Output soapBodyIO `xml:"output"`
Fault soapBody `xml:"fault>fault"`
}
type soapOperation struct {
SoapAction string `xml:"soapAction,attr"`
}
type soapBodyIO struct {
SoapBody soapBody `xml:"body"`
}
type soapBody struct {
Name string `xml:"name,attr,omitempty"`
Use string `xml:"use,attr"`
}
type service struct {
Name string `xml:"name,attr"`
Port servicePort `xml:"port"`
}
type servicePort struct {
XMLName xml.Name `xml:"port"`
Name string `xml:"name,attr"`
Binding string `xml:"binding,attr"`
Address serviceAddress `xml:"address"`
}
type serviceAddress struct {
XMLName xml.Name `xml:"address"`
Location string `xml:"location,attr"`
}
|
package game
import (
"github.com/golang/glog"
"log"
"qipai/dao"
"qipai/utils"
"zero"
)
type handler func(s *zero.Session, msg *zero.Message)
type handlerWrap struct {
needAuth bool // 记录 是否需要授权后才能执行handler
handler handler
}
// 保存所有消息的处理函数
var handlers map[int32]handlerWrap = make(map[int32]handlerWrap)
// 添加消息处理函数
func AddHandler(msgID int32, handler handler) {
// 如果已存在,直接退出
if _, ok := handlers[msgID]; ok {
glog.Fatalln(msgID, "消息处理函数已存在,请勿重复添加")
}
handlers[msgID] = handlerWrap{handler: handler}
}
// 添加需要授权后才可以访问的处理函数
func AddAuthHandler(msgID int32, handler handler) {
// 如果已存在,直接退出
if _, ok := handlers[msgID]; ok {
glog.Fatalln(msgID, "消息处理函数已存在,请勿重复添加")
}
handlers[msgID] = handlerWrap{handler: handler, needAuth: true}
}
func HandleMessage(s *zero.Session, msg *zero.Message) {
msgID := msg.GetID()
handler, ok := handlers[msgID]
if !ok {
glog.Warning("存在未处理的消息,编号为:", msgID)
return
}
// 需要登录 并且没登录,就提示错误
if handler.needAuth && !IsLogin(s) {
utils.Msg("未登陆,无权执行该操作").Code(-1).Send(NoPermission, s)
return
}
handler.handler(s, msg)
}
// HandleDisconnect 处理网络断线
func HandleDisconnect(s *zero.Session, err error) {
glog.V(2).Infoln(s.GetConn().GetName() + " 掉线")
// 如果玩家已登录,保存掉线玩家
if IsLogin(s){
p,e:=GetPlayerFromSession(s)
if e!=nil{
glog.Errorln(e)
err = e
return
}
RemovePlayer(p.Uid)
ClubPlayers.Del(p.Uid) // 从俱乐部在线列表中删除
dao.Db().Save(p)
}
}
// HandleConnect 处理网络连接
func HandleConnect(s *zero.Session) {
log.Println(s.GetConn().GetName() + " 连接")
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package opentelekom
import (
"github.com/CS-SI/SafeScale/providers/model"
)
// CreateNetwork creates a network (ie a subnet in the network associated to VPC in FlexibleEngine
func (client *Client) CreateNetwork(req model.NetworkRequest) (*model.Network, error) {
return client.feclt.CreateNetwork(req)
}
// GetNetworkByName returns the network identified by name
func (client *Client) GetNetworkByName(name string) (*model.Network, error) {
return client.feclt.GetNetworkByName(name)
}
// GetNetwork returns the network identified by id
func (client *Client) GetNetwork(id string) (*model.Network, error) {
return client.feclt.GetNetwork(id)
}
// ListNetworks lists networks
func (client *Client) ListNetworks() ([]*model.Network, error) {
return client.feclt.ListNetworks()
}
// DeleteNetwork consists to delete subnet in FlexibleEngine VPC
func (client *Client) DeleteNetwork(id string) error {
return client.feclt.DeleteNetwork(id)
}
// CreateGateway creates a gateway for a network.
// By current implementation, only one gateway can exist by Network because the object is intended
// to contain only one hostID
func (client *Client) CreateGateway(req model.GatewayRequest) (*model.Host, error) {
return client.feclt.CreateGateway(req)
}
// // GetGateway returns the name of the gateway of a network
// func (client *Client) GetGateway(networkID string) (*model.Host, error) {
// return client.feclt.GetGateway(networkID)
// }
// DeleteGateway deletes the gateway associated with network identified by ID
func (client *Client) DeleteGateway(networkID string) error {
return client.feclt.DeleteGateway(networkID)
}
|
package gate
import "github.com/mi4tin/go-chassis-gate/filehelper"
var configObj *Config
//Config 是gate相关的一些配置
type Config struct {
//白名单
IPWhiteList string `yaml:"ipWhiteList"`
}
func init() {
initConfig()
}
//配置初始化
func initConfig() {
configObj = &Config{}
err := filehelper.GetConfig(configObj, filehelper.FileNameGate)
if err != nil {
panic(err)
}
}
//GetConfig 获取配置
func GetConfig() *Config {
return configObj
}
|
package handlers
import (
"net/http"
"strings"
"github.com/cloudfoundry-incubator/notifications/metrics"
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/cloudfoundry-incubator/notifications/postal"
"github.com/ryanmoran/stack"
)
type NotifySpace struct {
errorWriter ErrorWriterInterface
notify NotifyInterface
recipe postal.RecipeInterface
database models.DatabaseInterface
}
func NewNotifySpace(notify NotifyInterface, errorWriter ErrorWriterInterface, recipe postal.RecipeInterface, database models.DatabaseInterface) NotifySpace {
return NotifySpace{
errorWriter: errorWriter,
notify: notify,
recipe: recipe,
database: database,
}
}
func (handler NotifySpace) ServeHTTP(w http.ResponseWriter, req *http.Request, context stack.Context) {
connection := handler.database.Connection()
err := handler.Execute(w, req, connection, context, handler.recipe)
if err != nil {
handler.errorWriter.Write(w, err)
return
}
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.web.spaces",
}).Log()
}
func (handler NotifySpace) Execute(w http.ResponseWriter, req *http.Request, connection models.ConnectionInterface,
context stack.Context, recipe postal.RecipeInterface) error {
spaceGUID := postal.SpaceGUID(strings.TrimPrefix(req.URL.Path, "/spaces/"))
output, err := handler.notify.Execute(connection, req, context, spaceGUID, recipe)
if err != nil {
return err
}
w.WriteHeader(http.StatusOK)
w.Write(output)
return nil
}
|
package link
import (
"os"
"testing"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/testutils"
)
func TestSkLookup(t *testing.T) {
testutils.SkipOnOldKernel(t, "5.8", "sk_lookup program")
prog := mustLoadProgram(t, ebpf.SkLookup, ebpf.AttachSkLookup, "")
netns, err := os.Open("/proc/self/ns/net")
if err != nil {
t.Fatal(err)
}
defer netns.Close()
link, err := AttachNetNs(int(netns.Fd()), prog)
if err != nil {
t.Fatal("Can't attach link:", err)
}
testLink(t, link, prog)
}
func createSkLookupProgram() (*ebpf.Program, error) {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.SkLookup,
AttachType: ebpf.AttachSkLookup,
License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
},
})
if err != nil {
return nil, err
}
return prog, nil
}
func ExampleAttachNetNs() {
prog, err := createSkLookupProgram()
if err != nil {
panic(err)
}
defer prog.Close()
// This can be a path to another netns as well.
netns, err := os.Open("/proc/self/ns/net")
if err != nil {
panic(err)
}
defer netns.Close()
link, err := AttachNetNs(int(netns.Fd()), prog)
if err != nil {
panic(err)
}
// The socket lookup program is now active until Close().
link.Close()
}
|
// Copyright 2016 Jacques Supcik, HEIA-FR
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package renderer
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/json"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/aetest"
"google.golang.org/appengine/datastore"
"net/http/httptest"
"os"
"testing"
)
type token struct {
Present bool
}
var validUUID, invalidUUID string
var validToken, invalidToken string
func insertToken(ctx context.Context, uuid string) error {
t := token{
Present: true,
}
key := datastore.NewKey(ctx, "ValidTokens", uuid, 0, nil)
_, err := datastore.Put(ctx, key, &t)
return err
}
func TestRenderText(t *testing.T) {
txt := TextMsg{
Text: "+",
ForegroundColor: "#000100",
BackgroundColor: "#010000",
FontSize: 6,
}
var b bytes.Buffer
enc := json.NewEncoder(&b)
enc.Encode(&txt)
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderText", &b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderText(w, req)
assert.Equal(t, 200, w.Code)
dec := json.NewDecoder(w.Body)
assert.NoError(t, err)
var result Matrix
err = dec.Decode(&result)
assert.NoError(t, err)
assert.Equal(t, 8, result.Rows)
assert.Equal(t, 6, result.Columns)
assert.Equal(t, 8*6, len(result.Bitmap))
assert.EqualValues(t, 1<<16, result.Bitmap[0])
assert.EqualValues(t, 1<<8, result.Bitmap[11])
}
func TestRenderImage(t *testing.T) {
b, err := os.Open("logo.png")
assert.NoError(t, err)
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderImage", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderImage(w, req)
assert.Equal(t, 200, w.Code)
dec := json.NewDecoder(w.Body)
assert.NoError(t, err)
var result Matrix
err = dec.Decode(&result)
assert.NoError(t, err)
assert.Equal(t, 8, result.Rows)
assert.Equal(t, 8, result.Columns)
assert.Equal(t, 8*8, len(result.Bitmap))
assert.EqualValues(t, 0, result.Bitmap[0])
assert.EqualValues(t, 2596560, result.Bitmap[11])
}
func TestRenderSpace(t *testing.T) {
txt := Space{
Len: 13,
BackgroundColor: "#010000",
}
var b bytes.Buffer
enc := json.NewEncoder(&b)
enc.Encode(&txt)
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderSpace", &b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderSpace(w, req)
assert.Equal(t, 200, w.Code)
dec := json.NewDecoder(w.Body)
assert.NoError(t, err)
var result Matrix
err = dec.Decode(&result)
assert.NoError(t, err)
assert.Equal(t, 8, result.Rows)
assert.Equal(t, 13, result.Columns)
assert.Equal(t, 8*13, len(result.Bitmap))
assert.EqualValues(t, 1<<16, result.Bitmap[0])
assert.EqualValues(t, 1<<16, result.Bitmap[11])
}
func TestJoin(t *testing.T) {
txt1 := TextMsg{
Text: "+",
ForegroundColor: "#000001",
BackgroundColor: "#010000",
FontSize: 8,
}
txt2 := TextMsg{
Text: "+",
ForegroundColor: "#000010",
BackgroundColor: "#100000",
FontSize: 6,
}
var b bytes.Buffer
enc := json.NewEncoder(&b)
enc.Encode(&txt1)
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderText", &b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderText(w, req)
assert.Equal(t, 200, w.Code)
dec := json.NewDecoder(w.Body)
assert.NoError(t, err)
var result1 Matrix
err = dec.Decode(&result1)
assert.NoError(t, err)
assert.Equal(t, 8, result1.Rows)
assert.Equal(t, 8, result1.Columns)
assert.Equal(t, 8*8, len(result1.Bitmap))
assert.EqualValues(t, 1<<16, result1.Bitmap[0])
assert.EqualValues(t, 1<<0, result1.Bitmap[11])
b.Reset()
enc.Encode(&txt2)
req, err = inst.NewRequest("POST", "/renderText", &b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
renderText(w, req)
assert.Equal(t, 200, w.Code)
dec = json.NewDecoder(w.Body)
assert.NoError(t, err)
var result2 Matrix
err = dec.Decode(&result2)
assert.NoError(t, err)
assert.Equal(t, 8, result2.Rows)
assert.Equal(t, 6, result2.Columns)
assert.Equal(t, 8*6, len(result2.Bitmap))
assert.EqualValues(t, 1<<20, result2.Bitmap[0])
assert.EqualValues(t, 1<<4, result2.Bitmap[11])
list := []Matrix{result1, result2}
b.Reset()
enc.Encode(&list)
req, err = inst.NewRequest("POST", "/join", &b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
join(w, req)
assert.Equal(t, 200, w.Code)
dec = json.NewDecoder(w.Body)
assert.NoError(t, err)
var result Matrix
err = dec.Decode(&result)
assert.NoError(t, err)
assert.Equal(t, 8, result.Rows)
assert.Equal(t, (8 + 6), result.Columns)
assert.Equal(t, 8*(8+6), len(result.Bitmap))
assert.EqualValues(t, 1<<16, result.Bitmap[0])
assert.EqualValues(t, 1<<0, result.Bitmap[11])
assert.EqualValues(t, 1<<20, result.Bitmap[8*8+0])
assert.EqualValues(t, 1<<4, result.Bitmap[8*8+11])
}
func TestRenderSpaceErrors(t *testing.T) {
b := bytes.NewBufferString("GARBAGE")
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderSpace", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderSpace(w, req)
assert.Equal(t, 400, w.Code)
txt := Space{
Len: 12,
BackgroundColor: "BADCOLOR",
}
var b1 bytes.Buffer
enc := json.NewEncoder(&b1)
enc.Encode(&txt)
req, err = inst.NewRequest("POST", "/renderSpace", &b1)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
renderSpace(w, req)
assert.Equal(t, 400, w.Code)
}
func TestRenderTextErrors(t *testing.T) {
b := bytes.NewBufferString("GARBAGE")
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderText", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderText(w, req)
assert.Equal(t, 400, w.Code)
txt := TextMsg{
Text: "+",
ForegroundColor: "BADCOLOR",
BackgroundColor: "#010000",
FontSize: 6,
}
var b1 bytes.Buffer
enc := json.NewEncoder(&b1)
enc.Encode(&txt)
req, err = inst.NewRequest("POST", "/renderText", &b1)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
renderText(w, req)
txt = TextMsg{
Text: "+",
ForegroundColor: "#ff00CC",
BackgroundColor: "BADCOLOR",
FontSize: 6,
}
b1.Reset()
enc.Encode(&txt)
req, err = inst.NewRequest("POST", "/renderText", &b1)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
renderText(w, req)
}
func TestRenderImageErrors(t *testing.T) {
b, err := os.Open("LICENSE")
assert.NoError(t, err)
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/renderImage", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
renderImage(w, req)
assert.Equal(t, 400, w.Code)
b, err = os.Open("badsize.png")
assert.NoError(t, err)
req, err = inst.NewRequest("POST", "/renderImage", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
renderImage(w, req)
assert.Equal(t, 400, w.Code)
}
func TestJoinError(t *testing.T) {
b := bytes.NewBufferString("GARBAGE")
inst, err := aetest.NewInstance(nil)
assert.NoError(t, err)
req, err := inst.NewRequest("POST", "/join", b)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w := httptest.NewRecorder()
join(w, req)
assert.Equal(t, 400, w.Code)
list := []Matrix{}
var b1 bytes.Buffer
enc := json.NewEncoder(&b1)
enc.Encode(&list)
req, err = inst.NewRequest("POST", "/join", &b1)
assert.NoError(t, err)
insertToken(appengine.NewContext(req), validUUID)
req.Header.Add("Authorization", "Bearer "+validToken)
w = httptest.NewRecorder()
join(w, req)
assert.Equal(t, 400, w.Code)
}
func init() {
var err error
validUUID, err = uuid.GenerateUUID()
if err != nil {
panic(err)
}
invalidUUID, err = uuid.GenerateUUID()
if err != nil {
panic(err)
}
goodPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
os.Setenv("PUBLIC_KEY_X", fmt.Sprintf("%X", goodPrivateKey.X))
os.Setenv("PUBLIC_KEY_Y", fmt.Sprintf("%X", goodPrivateKey.Y))
validToken, err = jwt.NewWithClaims(jwt.SigningMethodES256, &jwt.StandardClaims{
Audience: "basic",
Subject: "Jacques",
Issuer: "BlueMasters",
Id: validUUID,
}).SignedString(goodPrivateKey)
if err != nil {
panic(err)
}
invalidToken, err = jwt.NewWithClaims(jwt.SigningMethodES256, &jwt.StandardClaims{
Audience: "basic",
Subject: "Jacques",
Issuer: "BlueMasters",
Id: invalidUUID,
}).SignedString(goodPrivateKey)
if err != nil {
panic(err)
}
}
|
package parens_test
import (
"errors"
"testing"
"github.com/spy16/parens"
"github.com/spy16/parens/parser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func add(a, b float64) float64 {
return a + b
}
func BenchmarkParens_Execute(suite *testing.B) {
ins := parens.New(parens.NewScope(nil))
suite.Run("Execute", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ins.Execute("(add 1 2)")
}
})
expr := parser.ListExpr{
List: []parser.Expr{
parser.SymbolExpr{
Symbol: "add",
},
parser.NumberExpr{
Number: 1,
},
parser.NumberExpr{
Number: 2,
},
},
}
suite.Run("ExecuteExpr", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ins.ExecuteExpr(expr)
}
})
}
func BenchmarkParens_FunctionCall(suite *testing.B) {
suite.Run("DirectCall", func(b *testing.B) {
for i := 0; i < b.N; i++ {
add(1, 2)
}
})
expr, err := parser.Parse("<test>", "(add 1 2)")
if err != nil {
suite.Fatalf("failed to parse expression: %s", err)
}
suite.Run("CallThroughParens", func(b *testing.B) {
scope := parens.NewScope(nil)
scope.Bind("add", add)
for i := 0; i < b.N; i++ {
expr.Eval(scope)
}
})
}
func TestExecute_Success(t *testing.T) {
scope := parens.NewScope(nil)
par := parens.New(scope)
par.Parse = mockParseFn(mockExpr(10, nil), nil)
res, err := par.Execute("10")
assert.NoError(t, err)
require.NotNil(t, res)
assert.Equal(t, res, 10)
}
func TestExecute_EvalFailure(t *testing.T) {
scope := parens.NewScope(nil)
par := parens.New(scope)
par.Parse = mockParseFn(mockExpr(nil, errors.New("failed")), nil)
res, err := par.Execute("(hello)")
require.Error(t, err)
assert.Equal(t, errors.New("failed"), err)
assert.Nil(t, res)
}
func TestExecute_ParseFailure(t *testing.T) {
scope := parens.NewScope(nil)
par := parens.New(scope)
par.Parse = mockParseFn(nil, errors.New("failed"))
res, err := par.Execute("(hello)")
require.Error(t, err)
assert.Equal(t, errors.New("failed"), err)
assert.Nil(t, res)
}
func mockExpr(v interface{}, err error) parser.Expr {
return exprMock(func(scope parser.Scope) (interface{}, error) {
if err != nil {
return nil, err
}
return v, nil
})
}
func mockParseFn(expr parser.Expr, err error) parens.ParseFn {
return func(name, src string) (parser.Expr, error) {
if err != nil {
return nil, err
}
return expr, nil
}
}
type exprMock func(scope parser.Scope) (interface{}, error)
func (sm exprMock) Eval(scope parser.Scope) (interface{}, error) {
return sm(scope)
}
|
package main
import (
"bufio"
"os"
"fmt"
"strconv"
)
var e_ map[int][]string
func e(n int) ([]string) {
if n == 0 {
return []string{""}
} else if _, exists := e_[n]; exists {
return e_[n]
}
e_[n] = make([]string, 0, 1)
i := 0
j := n - 1
for i < n {
for _, a := range e(i) {
for _, b := range e(j) {
e_[n] = append(e_[n], "(" + a + ")" + b)
}
}
i += 1
j -= 1
}
return e_[n]
}
func main() {
e_ = make(map[int][]string)
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
n, err := strconv.Atoi(scanner.Text())
if err != nil { break }
for _, s := range e(n) {
fmt.Printf("%s\n", s)
}
}
}
|
// Package docs GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// This file was generated by swaggo/swag
package docs
import (
"bytes"
"encoding/json"
"strings"
"text/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{escape .Description}}",
"title": "{{.Title}}",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "NGUYEN LE BAO TRUONG",
"url": "https://github.com/allvisss",
"email": "truongnlbse140940@fpt.edu.vn"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/v1/decrypt": {
"post": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Decrypt"
],
"summary": "Decrypt ciphertext into plaintext (using curve secp112r1 as demo)",
"parameters": [
{
"type": "string",
"description": "Ciphertext",
"name": "ciphertext",
"in": "query"
},
{
"description": "Keypair",
"name": "priv",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/api.Keypair"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/decrypt2": {
"post": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Decrypt"
],
"summary": "Decrypt ciphertext into plaintext",
"parameters": [
{
"type": "string",
"description": "Ciphertext",
"name": "ciphertext",
"in": "query"
},
{
"description": "Keypair",
"name": "priv",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/api.Keypair"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/encrypt": {
"post": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Encrypt"
],
"summary": "Encrypt plaintext into ciphertext (using curve secp112r1 as demo)",
"parameters": [
{
"type": "string",
"description": "Plaintext",
"name": "msg",
"in": "query"
},
{
"description": "Keypair",
"name": "priv",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/api.Keypair"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/encrypt2": {
"post": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Encrypt"
],
"summary": "Encrypt plaintext into ciphertext",
"parameters": [
{
"type": "string",
"description": "Plaintext",
"name": "msg",
"in": "query"
},
{
"description": "Key",
"name": "priv",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/api.Keypair"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/keygen": {
"get": {
"consumes": [
"*/*"
],
"produces": [
"application/json"
],
"tags": [
"Key"
],
"summary": "Generate key pair (using curve secp112r1 as demo)",
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/keygen2": {
"get": {
"consumes": [
"*/*"
],
"produces": [
"application/json"
],
"tags": [
"Key"
],
"summary": "Generate key pair",
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
},
"/v1/testDecrypt": {
"get": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Test"
],
"summary": "Testing decrypt method",
"parameters": [
{
"type": "string",
"description": "Plaintext",
"name": "plaintext",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/response.Response"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/response.ErrorResponse"
}
}
}
}
}
},
"definitions": {
"api.Keypair": {
"type": "object",
"properties": {
"D": {
"type": "string"
},
"X": {
"type": "string"
},
"Y": {
"type": "string"
},
"curve": {
"type": "object",
"properties": {
"B": {
"type": "string"
},
"BitSize": {
"type": "integer"
},
"Gx": {
"type": "string"
},
"Gy": {
"type": "string"
},
"N": {
"type": "string"
},
"Name": {
"type": "string"
},
"P": {
"type": "string"
}
}
}
}
},
"response.ErrorResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
}
}
},
"response.Response": {
"type": "object",
"properties": {
"code": {
"type": "string"
},
"data": {
"type": "object"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "0.0",
Host: "",
BasePath: "",
Schemes: []string{},
Title: "ECC SERVICE API",
Description: "Elliptic Curve Cryptography service API for IOT system",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
"escape": func(v interface{}) string {
// escape tabs
str := strings.Replace(v.(string), "\t", "\\t", -1)
// replace " with \", and if that results in \\", replace that with \\\"
str = strings.Replace(str, "\"", "\\\"", -1)
return strings.Replace(str, "\\\\\"", "\\\\\\\"", -1)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
}
|
package startup
import (
"database/sql"
"encoding/json"
"fmt"
_ "github.com/denisenkom/go-mssqldb"
"io/ioutil"
"log"
"net/url"
"os"
"path/filepath"
)
/**
Structure to store all the configuration parameters
*/
type Parameters struct {
MaxQuestions int `json:"max_questions"`
QuestionsPerUser int `json:"questions_per_user"`
NoOfQuestionsForChallenger int `json:"no_of_questions_for_challenge"`
DbParams DatabaseParams `json:"db_params"`
}
/**
Structure to store all the database parameters
*/
type DatabaseParams struct {
DbType string `json:"db_type"`
DbUser string `json:"db_user"`
DbPwd string `json:"db_pwd"`
DbHost string `json:"db_host"`
DbPort int `json:"db_port"`
DbSchema string `json:"db_schema"`
}
var (
Db *sql.DB
ConfigParameters Parameters
CurrentPath string
)
/**
Load all the configuration parameters
Setup the database handler using the config parameters
*/
func init() {
setCurrentPath()
loadParams()
log.Println("Setting up con")
u := &url.URL{
Scheme: ConfigParameters.DbParams.DbSchema,
User: url.UserPassword(ConfigParameters.DbParams.DbUser, ConfigParameters.DbParams.DbPwd),
Host: fmt.Sprintf("%s:%d", ConfigParameters.DbParams.DbHost, ConfigParameters.DbParams.DbPort),
}
dbCon, err := sql.Open(ConfigParameters.DbParams.DbType, u.String())
if err != nil {
log.Fatal("Could not open Database handler: ", err)
} else {
Db = dbCon
log.Println("Successfully created Database con")
log.Println("Db handler valid: ", IsConOk())
}
}
func setCurrentPath() {
ex, err := os.Executable()
if err != nil {
panic(err)
}
CurrentPath = filepath.Dir(ex)
log.Println("Current directory path set as [" + CurrentPath + "]")
}
/**
Verify if the database connection handler is valid or not
*/
func IsConOk() bool {
if err := Db.Ping(); err != nil {
log.Fatal("Database connection is not valid, error: ", err)
return false
} else {
return true
}
}
/**
Load all the configuration parameters from the json file
*/
func loadParams() {
params, _ := ioutil.ReadFile("/home/paritosh/go/src/github.com/paritosh-96/RestServer/config/setupParameters.json")
ConfigParameters = Parameters{}
_ = json.Unmarshal([]byte(params), &ConfigParameters)
log.Println("Config parameters loaded...")
}
|
package util
import yaml "gopkg.in/yaml.v2"
// Convert converts the old object into the new object through json serialization / deserialization
func Convert(old interface{}, new interface{}) error {
o, err := yaml.Marshal(old)
if err != nil {
return err
}
if err := yaml.Unmarshal(o, new); err != nil {
return err
}
return nil
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-04 08:59
* Description:
*****************************************************************/
package main
import (
"fmt"
"github.com/go-xe2/xthrift/netstream"
"sync"
"time"
)
type tclientHandler struct {
}
var _ netstream.ClientStreamHandler = (*tclientHandler)(nil)
func (p *tclientHandler) OnRecv(conn netstream.StreamConn, data []byte) {
fmt.Println("recv data:", string(data))
}
func (p *tclientHandler) OnCall(conn netstream.StreamConn, data []byte) (result []byte, err error) {
fmt.Println("客户端被调用,输入数据:", string(data))
return []byte("hello, this is client result data."), nil
}
func (p *tclientHandler) OnConnect(conn netstream.StreamConn) {
fmt.Println("client onConnect.")
}
func (p *tclientHandler) OnReconnect(conn netstream.StreamConn) {
fmt.Println("client reconnect.")
}
func (p *tclientHandler) OnDisconnect(conn netstream.StreamConn) {
fmt.Println("client:", conn.Id(), ", onDisconnect")
}
func (p *tclientHandler) OnRequest(reqId string, namespace string, data []byte) {
fmt.Println("收到请求 reqId:", reqId, ",namespace:", namespace, ", data:", string(data))
}
func (p *tclientHandler) OnResponse(reqId string, data []byte) {
fmt.Println("收到请求回复 reqId:", reqId, ", data:", string(data))
}
func startClient() string {
options := netstream.NewStmClientOptions()
options.SetWriteTimeout(3 * time.Minute)
options.SetReadTimeout(3 * time.Minute)
options.SetHeartbeatSpeed(1 * time.Minute)
options.SetAllowMaxLoss(3)
client, err := netstream.NewStreamClient("127.0.0.1:8000", options)
if err != nil {
panic(err)
}
var handler = &tclientHandler{}
client.SetHandler(handler)
if e := client.Open(); e != nil {
fmt.Println("open netClient error:", e)
}
fmt.Println("2秒后发送:hello, netstream.")
time.Sleep(2 * time.Second)
fmt.Println("开始发送")
client.Send([]byte("hello, netstream."))
fmt.Println("数据发送完成.")
fmt.Println("秒后调用call")
time.Sleep(2 * time.Second)
result, e := client.Call([]byte("客户端呼叫数据"))
fmt.Println("客户端调用返回:", string(result), ", err:", e)
fmt.Println("准备发送请求")
if e := client.Request(client.MakeRequestId(), "mnyun.com", []byte("我是客户端调用数据")); e != nil {
fmt.Println("客户端调用出错:")
}
if e := client.Serve(nil); e != nil {
fmt.Println("客户端已结束,出错:", e)
}
return client.Id()
}
func startClientWithCount(wg *sync.WaitGroup, n int) {
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
defer wg.Done()
id := startClient()
fmt.Println("=======>> ^^^^^ 客户端:", id, "已关闭")
}()
}
}
func main() {
// 创建1000个客户端测试
var wg sync.WaitGroup
var count = 3
startClientWithCount(&wg, count)
wg.Wait()
fmt.Println(count, "个客户端都已经关闭")
}
|
package Service
import (
"Work_5/DAO"
"Work_5/object"
)
//提问方法
func PutQuestion(user *object.User, question *object.Question) object.ErrMessage {
//获取数据库连接
db, err := DAO.DataBaseInit()
if err.IsErr {
return err
}
//查询用户是否存在
isexist, _ := DAO.UserQuery(user, db)
if !isexist {
err.IsErr = true
err.Whaterror = "user not exist"
return err
}
//如果用户存在,创建相应记录
//这一行貌似可以删了,因为question的QuestionerId也接受userid这个表单信息
question.QuestionerId = user.UserId
//调用DAO提出问题的方法
err = DAO.CreateQuestion(question, db)
if err.IsErr {
return err
} else {
return object.ErrMessage{}
}
}
|
package main
import (
"os"
"text/template"
)
var tmpl = template.Must(template.New("hello").Parse(`Hello from {{ . }}`))
func main() {
err := tmpl.Execute(os.Stdout, "go templates!")
if err != nil {
panic(err)
// NOTE: This error is not reachable in this example
}
}
|
package orders
import (
"Pinjem/businesses/orders"
"context"
"time"
"gorm.io/gorm"
)
type OrderRepository struct {
Conn *gorm.DB
}
func NewOrderRepository(conn *gorm.DB) orders.DomainRepository {
return &OrderRepository{Conn: conn}
}
func (b *OrderRepository) GetAll(ctx context.Context) ([]orders.Domain, error) {
var ordersModel []Orders
if err := b.Conn.Find(&ordersModel).Error; err != nil {
return nil, err
}
var result []orders.Domain
result = ToListDomain(ordersModel)
return result, nil
}
func (b *OrderRepository) GetOrdersByUserId(ctx context.Context, userId uint) ([]orders.Domain, error) {
var ordersModel []Orders
if err := b.Conn.Where("user_id = ?", userId).Find(&ordersModel).Error; err != nil {
return nil, err
}
var result []orders.Domain
result = ToListDomain(ordersModel)
return result, nil
}
func (b *OrderRepository) GetById(ctx context.Context, id uint) (orders.Domain, error) {
var order Orders
if err := b.Conn.Where("id = ?", id).First(&order).Error; err != nil {
return orders.Domain{}, err
}
return order.ToDomain(), nil
}
func (b *OrderRepository) Create(ctx context.Context, order orders.Domain) (orders.Domain, error) {
createdOrder := Orders{
UserId: order.UserId,
Status: order.Status,
}
createdOrder.BeforeCreate()
insertErr := b.Conn.Create(&createdOrder).Error
if insertErr != nil {
return orders.Domain{}, insertErr
}
return createdOrder.ToDomain(), nil
}
func (b *OrderRepository) UpdateStatus(ctx context.Context, id uint, status bool) (orders.Domain, error) {
var orderModel Orders
if err := b.Conn.Where("id = ?", id).First(&orderModel).Error; err != nil {
return orders.Domain{}, err
}
orderModel.Status = status
orderModel.UpdatedAt = time.Now()
if err := b.Conn.Save(&orderModel).Error; err != nil {
return orders.Domain{}, err
}
return orderModel.ToDomain(), nil
}
func (b *OrderRepository) Delete(ctx context.Context, id uint) error {
var order Orders
if err := b.Conn.Where("id = ?", id).Delete(&order).Error; err != nil {
return err
}
return nil
}
// func (b *OrderRepository) Update(user *User) error {
// return b.Conn.Save(user).Error
// }
|
package service
import (
"context"
"github.com/koind/cacher/internal/domain/repository"
)
// Сервис кэша
type CacheService struct {
cacheRepository repository.CacheRepositoryInterface
}
// Создает новый сервис кэша
func NewCacheService(cr repository.CacheRepositoryInterface) *CacheService {
return &CacheService{
cacheRepository: cr,
}
}
// Обновить запись, если существует, и создает, если нет
func (s *CacheService) Upsert(ctx context.Context, cache repository.Cache) (*repository.Cache, error) {
return s.cacheRepository.Upsert(ctx, cache)
}
// Возвращет одну запись по ключу
func (s *CacheService) GetOneByKey(ctx context.Context, key string) (*repository.Cache, error) {
return s.cacheRepository.GetOneByKey(ctx, key)
}
// Возвращет все записи
func (s *CacheService) GetAll(ctx context.Context) ([]*repository.Cache, error) {
return s.cacheRepository.GetAll(ctx)
}
// Удаляет одну запись по ключу
func (s *CacheService) Delete(ctx context.Context, key string) error {
return s.cacheRepository.Delete(ctx, key)
}
|
package calculator
import (
"fmt"
"strconv"
"strings"
)
type stack struct{ vec []string }
func (s stack) Empty() bool { return len(s.vec) == 0 }
func (s *stack) Push(str string) { s.vec = append(s.vec, str) }
func (s *stack) Pop() string {
d := s.vec[len(s.vec)-1]
s.vec = s.vec[:len(s.vec)-1]
return d
}
func toInt(s string) int {
val, err := strconv.Atoi(s)
if err != nil {
fmt.Errorf("Unable to handle: %v", s)
}
return val
}
func performOp(v1, v2, op string) string {
p := toInt(v1)
q := toInt(v2)
result := 0
switch op {
case "+":
result = p + q
case "-":
result = p - q
case "/":
result = p / q
case "*":
result = p * q
}
return strconv.Itoa(result)
}
// use djikstra's two-stack algorithm
func Evaluate(expr string) int {
values := stack{}
operators := stack{}
operands := strings.Split(expr, " ")
for _, s := range operands {
switch s {
default:
values.Push(s)
case "(":
continue
case "+", "-", "/", "*":
operators.Push(s)
case ")":
values.Push(performOp(values.Pop(), values.Pop(), operators.Pop()))
}
}
result, err := strconv.Atoi(values.Pop())
if err != nil {
panic(fmt.Sprintf("Could not process, malformed expression? %v", err))
}
return result
}
|
package postgres
import (
"context"
core "github.com/Qalifah/aboki-africa-assessment"
)
type TransactionRepository struct {
client *Client
}
func NewTransactionRepository(client *Client) *TransactionRepository {
return &TransactionRepository{
client: client,
}
}
func(t *TransactionRepository) CreateTransaction(ctx context.Context, transaction *core.Transaction) error {
tx, err := t.client.GetTx(ctx)
if err != nil {
return err
}
row := tx.QueryRow(ctx,
"INSERT INTO transactions (sender_id, recipient_id, points) VALUES ($1, $2, $3, $4) RETURNING id",
transaction.SenderID, transaction.RecipientID, transaction.Points, transaction.Type,
)
err = row.Scan(&transaction.ID)
return err
}
|
package main
import (
"fmt"
"testing"
)
func TestGouYouTuan(t *testing.T) {
err := Init()
if err != nil {
t.Fatal(err)
}
err = catchGouYouTuan()
if err != nil {
t.Fatal(err)
}
news, err := getNews()
if err != nil {
t.Fatal(err)
}
fmt.Println(news)
}
func TestGolangTC(t *testing.T) {
err := Init()
if err != nil {
t.Fatal(err)
}
err = catchGolangTC()
if err != nil {
t.Fatal(err)
}
news, err := getNews()
if err != nil {
t.Fatal(err)
}
fmt.Println(news)
}
func TestStudyGolang(t *testing.T) {
err := Init()
if err != nil {
t.Fatal(err)
}
err = catchStudyGolang()
if err != nil {
t.Fatal(err)
}
news, err := getNews()
if err != nil {
t.Fatal(err)
}
fmt.Println(news)
}
|
package remark
import (
"net/http"
ctl "github.com/go-jar/gohttp/controller"
"blog/controller/api"
"blog/svc/remark"
)
type RemarkContext struct {
*api.ApiContext
remarkSvc *remark.Svc
}
func (c *RemarkContext) BeforeAction() {
c.ApiContext.BeforeAction()
c.remarkSvc = remark.NewSvc(c.TraceId)
}
type RemarkController struct {
api.BaseController
}
func (rc *RemarkController) NewActionContext(req *http.Request, respWriter http.ResponseWriter) ctl.ActionContext {
ctx := new(RemarkContext)
ctx.ApiContext = rc.BaseController.NewActionContext(req, respWriter).(*api.ApiContext)
ctx.ApiData.Data = map[string]interface{}{
"RequestId": ctx.TraceId,
}
return ctx
}
|
package backend
import (
"net/http"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
}}
|
// This file was generated for SObject UserProvAccountStaging, API Version v43.0 at 2018-07-30 03:47:54.639201989 -0400 EDT m=+40.983346230
package sobjects
import (
"fmt"
"strings"
)
type UserProvAccountStaging struct {
BaseSObject
ConnectedAppId string `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
ExternalEmail string `force:",omitempty"`
ExternalFirstName string `force:",omitempty"`
ExternalLastName string `force:",omitempty"`
ExternalUserId string `force:",omitempty"`
ExternalUsername string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
LinkState string `force:",omitempty"`
Name string `force:",omitempty"`
SalesforceUserId string `force:",omitempty"`
Status string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *UserProvAccountStaging) ApiName() string {
return "UserProvAccountStaging"
}
func (t *UserProvAccountStaging) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("UserProvAccountStaging #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tConnectedAppId: %v\n", t.ConnectedAppId))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tExternalEmail: %v\n", t.ExternalEmail))
builder.WriteString(fmt.Sprintf("\tExternalFirstName: %v\n", t.ExternalFirstName))
builder.WriteString(fmt.Sprintf("\tExternalLastName: %v\n", t.ExternalLastName))
builder.WriteString(fmt.Sprintf("\tExternalUserId: %v\n", t.ExternalUserId))
builder.WriteString(fmt.Sprintf("\tExternalUsername: %v\n", t.ExternalUsername))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLinkState: %v\n", t.LinkState))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tSalesforceUserId: %v\n", t.SalesforceUserId))
builder.WriteString(fmt.Sprintf("\tStatus: %v\n", t.Status))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type UserProvAccountStagingQueryResponse struct {
BaseQuery
Records []UserProvAccountStaging `json:"Records" force:"records"`
}
|
package main
import (
"fmt"
"errors"
)
//关键字 func 、函数名、参数列表、返回值、函数体和返回语句。
//返回值被命名之后,它们的值在函数开始的时候被自动初始化为空
func Add(a,b int) (ret int,err error) {
if a<0 || b<0{
//fmt.Println(err)
//err 早就被初始化未 类型零值
err = errors.New("Should be non-negative nums")
return
}
return a+b,nil
}
//不定参数类型 被接收成为一个slice
// ...type 格式的类型只能作为函数的参数类型存在,并且必须是最后一个参数
func unknownArgs(args ... int) {
for index,nums:= range args{
fmt.Println(index,nums)
}
}
//闭包
//闭包 1.返回的是一个函数 2.放回的函数里面引用了外部函数的变量
func closure() {
var j int =5
fmt.Println("START closure")
// a 代表的是 一个函数 不接受任何参数,放回func()类型的函数
//
a := func() (func()){
var i int = 10
return func () {
i++
fmt.Printf("i, j: %d, %d\n", i, j)
}
}() //() 相当于a()
a() //11,5
a() //12,5
a() //13,5
}
//错误处理
// defer 语句的调用是遵照
//先进后出的原则,即最后一个 defer 语句将最先被执行
func deferHander(){
//defer 语句
fmt.Println("fmt1")
defer fmt.Println("defer1")
defer fmt.Println("defer2")
fmt.Println("fmt2")
}
//错误处理
//panic() 和 recover()
//调用 panic() 函数时,正常的函数执行流程将立即终止
//panic() 函数时,正常的函数执行流程将立即终止,但函数中
//之前使用 defer 关键字延迟执行的语句将正常展开执行,之后该函数将返回到调用函数,并导致
//逐层向上执行 panic 流程,直至所属的goroutine中所有正在执行的函数被终止。
func errorHandler(a,b float64 )(c float64){
defer func(){
if r:=recover();r!=nil{
fmt.Println("Recover get error info :",r)
}
}()
if b==0{
panic("b can't be 0")
}
c = a/b
//defer fmt.Println("defer errorHandler")
return c
}
func main() {
sum,ok:= Add(-1,2)
if ok==nil{
fmt.Println("sum:",sum)
}else{
fmt.Println("errors:",ok)
}
unknownArgs(1,2,3,4,5,6)
unknownArgs(5,6,8,7,9)
//匿名函数
//函数可以像普通变量一样被传递或使用,Go语言支持随时在代码里定义匿名函数
f := func(a,b int) int{
return a+b
}
sum = f(3,4)
fmt.Println(sum)
closure()
deferHander()
c := errorHandler(1,0)
fmt.Println(c)
}
|
package main
import "fmt"
func main() {
x :=make(map[string]int)
q :=make([]int,3)
x["a"]=3
x["c"]=4
q[0]=7
q[1]=4
q[2]=3
fmt.Println(x)
fmt.Println(q)
}
|
/*
(Lattice Paths)
Starting in the top left corner of a 2x2 grid, there are 6 routes (without backtracking) to the bottom right corner.
How many routes are there through a 20x20 grid?
*/
package main
import (
"fmt"
)
func main() {
// Justification:
// You have to make 40 steps to get from top-left to bottom-right,
// and you have to choose which of those 20 steps are down.
fmt.Printf("40 choose 20: 137846528820")
}
|
package main
import (
"encoding/json"
"encoding/xml"
"fmt"
"log"
"net/http"
"os"
)
var RechargeSercice string
func init() {
RechargeSercice = BuildServiceUrlPrefixFromEnv("CouponSercice", false, os.Getenv("ENV_NAME_DATAFOUNDRYCOUPON_SERVICE_HOST"), os.Getenv("ENV_NAME_DATAFOUNDRYCOUPON_SERVICE_PORT"))
}
func BuildServiceUrlPrefixFromEnv(name string, isHttps bool, addrEnv string, portEnv string) string {
var addr string
addr = os.Getenv(addrEnv)
if addr == "" {
fmt.Printf("%s env should not be null", addrEnv)
}
if portEnv != "" {
port := os.Getenv(portEnv)
if port != "" {
addr += ":" + port
}
}
prefix := ""
if isHttps {
prefix = fmt.Sprintf("https://%s", addr)
} else {
prefix = fmt.Sprintf("http://%s", addr)
}
fmt.Printf("%s = %s\n", name, prefix)
return prefix
}
func sayhelloName(w http.ResponseWriter, r *http.Request) {
r.ParseForm() //解析参数,默认是不会解析的
fmt.Println(r.Form) //这些信息是输出到服务器端的打印信息
fmt.Fprintf(w, "Hello weixin!") //这个写入到w的是输出到客户端的
}
type receiveMessage struct {
FromUserName string `xml:"FromUserName"`
MsgType string `xml:"MsgType"`
Event string `xml:"Event"`
CreateTime int64 `xml:"CreateTime"`
Content string `xml:"Content"`
}
func follow(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, r.FormValue("echostr"))
if r.Method != "POST" {
return
}
r.ParseForm() //解析参数,默认是不会解析的
//log.Println(r.Form) //这些信息是输出到服务器端的打印信息
log.Println("From", r.RemoteAddr, r.Method, r.URL.RequestURI(), r.Proto)
data, err := GetRequestData(r)
if err != nil {
return
}
//createtime
common := &receiveMessage{}
err = xml.Unmarshal(data, common)
if err != nil {
return
}
fmt.Println("------------>", common)
err = replySomething(common)
if err != nil {
return
}
if common.MsgType == "event" {
if common.Event == "subscribe" {
var send = struct {
OpenID string `json:"openId"`
ProvideTime int64 `json:"provideTime"`
}{
OpenID: common.FromUserName,
ProvideTime: common.CreateTime,
}
data, err = json.Marshal(&send)
if err != nil {
return
}
// log.Println("star", data)
_, data, err := RemoteCallWithBody(
"POST",
RechargeSercice+"/charge/v1/provide/coupons?number=1",
"",
"",
data,
"application/json; charset=utf-8",
)
if err != nil {
// log.Println("err", err)
return
}
// log.Println("end", data)
log.Println("pass")
type two struct {
IsProvide bool `json:"isProvide"`
Code string `json:"code"`
}
type Result struct {
Code uint `json:"code"`
Msg string `json:"msg"`
Data interface{} `json:"data,omitempty"`
}
var three = two{}
var card = Result{
Data: &three,
}
log.Println("data:", string(data))
err = json.Unmarshal(data, &card)
if err != nil {
log.Println("err2:", err)
return
}
log.Println("Code", three.Code)
log.Println("IsProvide", three.IsProvide)
if three.IsProvide {
return
}
type one struct {
Content string `json:"content"`
}
var obj = struct {
Touser string `json:"touser"`
Msgtype string `json:"msgtype"`
Text one `json:"text"`
}{
Touser: common.FromUserName,
Msgtype: "text",
Text: one{
Content: "您的充值卡号为" + three.Code + ",有效期截止至2017年02月31日",
},
}
data, err = json.Marshal(&obj)
if err != nil {
return
}
request, data, err := RemoteCallWithBody(
"POST",
"https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token="+gettoken(),
"",
"",
data,
"application/json; charset=utf-8",
)
if err != nil {
return
}
log.Println("data", data)
log.Println("request", request)
}
}
// if checkSignature(r) {
// fmt.Fprint(w, r.FormValue("echostr"))
// } else {
// fmt.Fprint(w, "hello wixin sb ") //这个写入到w的是输出到客户端的
// }
}
type sendMessage struct {
Touser string `json:"touser"`
Msgtype string `json:"msgtype"`
Text text `json:"text"`
}
type text struct {
Content string `json:"content"`
}
func replySomething(info *receiveMessage) error {
fmt.Println("---->Into replySomething function")
if info.MsgType == "text" {
fmt.Println("Message type is text")
err := replyStrategyWithText(info)
if err != nil {
return err
}
}
return nil
}
func replyStrategyWithText(info *receiveMessage) error {
if info.Content == "傻逼" {
replyInfo := &sendMessage{
Touser: info.FromUserName,
Msgtype: "text",
Text: text{
Content: "你才是傻逼",
},
}
err := replyContent(replyInfo)
if err != nil {
return err
}
} else {
replyInfo := &sendMessage{
Touser: info.FromUserName,
Msgtype: "text",
Text: text{
Content: "Hello",
},
}
err := replyContent(replyInfo)
if err != nil {
return err
}
}
return nil
}
func replyContent(info *sendMessage) error {
data, err := json.Marshal(info)
if err != nil {
return err
}
request, data, err := RemoteCallWithBody(
"POST",
"https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token="+gettoken(),
"",
"",
data,
"application/json; charset=utf-8",
)
if err != nil {
return err
}
fmt.Println("request:", request)
return err
}
|
package auth
import (
"context"
"github.com/google/uuid"
"github.com/spf13/viper"
"net/http"
)
type userCtx struct{}
func GuestSession(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tokenName := viper.GetString("oauth.sessionCookie.name")
_, err := r.Cookie(tokenName)
if err == http.ErrNoCookie {
sessionId := uuid.New().String()
token, err := GenerateTokenForGuest(sessionId)
if err != nil {
// todo: !!!
}
SetSessionCookie(w, token, SessionCookieConfig{
Name: viper.GetString("oauth.sessionCookie.name"),
Domain: viper.GetString("oauth.sessionCookie.domain"),
Path: viper.GetString("oauth.sessionCookie.path"),
MaxAge: viper.GetInt("oauth.sessionCookie.maxAge"),
Secure: viper.GetBool("oauth.sessionCookie.secure"),
})
ctx = context.WithValue(ctx, userCtx{}, User{
SessionId: sessionId,
IsAuthorized: false,
})
next.ServeHTTP(w, r.WithContext(ctx))
return
}
// todo: add ban check
w.Header().Set("Access-Control-Expose-Headers", "user_id")
next.ServeHTTP(w, r.WithContext(ctx))
}
return http.HandlerFunc(fn)
}
func GetUser(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tokenName := viper.GetString("oauth.sessionCookie.name")
sessionCookie, err := r.Cookie(tokenName)
if err == nil {
user, _, errN := ExtractUserAndTokenFromStringToken(sessionCookie.Value)
if errN == nil {
ctx = context.WithValue(ctx, userCtx{}, user)
next.ServeHTTP(w, r.WithContext(ctx))
return
}
}
// todo: add ban check
w.Header().Set("Access-Control-Expose-Headers", tokenName)
next.ServeHTTP(w, r.WithContext(ctx))
}
return http.HandlerFunc(fn)
}
|
package main
import (
"bytes"
"crypto/sha1"
"errors"
"flag"
"fmt"
"io"
"log"
"math"
"math/rand"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/rakoo/rakoshare/pkg/id"
"github.com/rakoo/rakoshare/pkg/sharesession"
ed "github.com/agl/ed25519"
"github.com/nictuku/dht"
"github.com/zeebo/bencode"
)
var (
// This error is returned when the incoming message is not of correct
// type, ie EXTENSION (which is 20)
errInvalidType = errors.New("invalid message type")
errMetadataMessage = errors.New("Couldn't create metadata message")
)
var useDHT = flag.Bool("useDHT", true, "Use DHT to get peers")
type ControlSession struct {
ID id.Id
Port int
PeerID string
// A channel of all announces we get from peers.
// If the announce is for the same torrent as the current one, then it
// is not broadcasted in this channel.
Torrents chan Announce
// A channel of all new peers we acknowledge, in a ip:port format
// The port is the one advertised
NewPeers chan string
// The current data torrent
currentIH string
rev string
ourExtensions map[int]string
header []byte
quit chan struct{}
dht *dht.DHT
peers *Peers
peerMessageChan chan peerMessage
trackers []string
session *sharesession.Session
}
func NewControlSession(shareid id.Id, listenPort int, session *sharesession.Session, trackers []string) (*ControlSession, error) {
sid := "-tt" + strconv.Itoa(os.Getpid()) + "_" + strconv.FormatInt(rand.Int63(), 10)
// TODO: UPnP UDP port mapping.
cfg := dht.NewConfig()
cfg.Port = listenPort
cfg.NumTargetPeers = TARGET_NUM_PEERS
dhtNode, err := dht.New(cfg)
if err != nil {
log.Fatal("DHT node creation error", err)
}
current := session.GetCurrentIHMessage()
var currentIhMessage IHMessage
err = bencode.NewDecoder(strings.NewReader(current)).Decode(¤tIhMessage)
if err != nil {
log.Printf("Couldn't decode current message, starting from scratch: %s\n", err)
}
rev := "0-"
if currentIhMessage.Info.Rev != "" {
parts := strings.Split(currentIhMessage.Info.Rev, "-")
if len(parts) == 2 {
if _, err := strconv.Atoi(parts[0]); err == nil {
rev = currentIhMessage.Info.Rev
}
}
}
cs := &ControlSession{
Port: listenPort,
PeerID: sid[:20],
ID: shareid,
Torrents: make(chan Announce),
NewPeers: make(chan string),
dht: dhtNode,
peerMessageChan: make(chan peerMessage),
quit: make(chan struct{}),
ourExtensions: map[int]string{
1: "ut_pex",
2: "bs_metadata",
},
peers: newPeers(),
currentIH: currentIhMessage.Info.InfoHash,
rev: rev,
trackers: trackers,
session: session,
}
go cs.dht.Run()
cs.dht.PeersRequest(string(cs.ID.Infohash), true)
go cs.Run()
return cs, nil
}
func (cs *ControlSession) log(message string, others ...interface{}) {
log.Println("[CONTROL]", message, others)
}
func (cs *ControlSession) logf(format string, args ...interface{}) {
log.Println("[CONTROL]", fmt.Sprintf(format, args))
}
func (cs *ControlSession) Header() (header []byte) {
if len(cs.header) > 0 {
return cs.header
}
header = make([]byte, 68)
copy(header, kBitTorrentHeader[0:])
header[27] = header[27] | 0x01
// Support Extension Protocol (BEP-0010)
header[25] |= 0x10
copy(header[28:48], cs.ID.Infohash)
copy(header[48:68], []byte(cs.PeerID))
cs.header = header
return
}
func (cs *ControlSession) deadlockDetector(heartbeat, quit chan struct{}) {
lastHeartbeat := time.Now()
deadlockLoop:
for {
select {
case <-quit:
break deadlockLoop
case <-heartbeat:
lastHeartbeat = time.Now()
case <-time.After(15 * time.Second):
age := time.Now().Sub(lastHeartbeat)
cs.log("Starvation or deadlock of main thread detected. Look in the stack dump for what Run() is currently doing.")
cs.log("Last heartbeat", age.Seconds(), "seconds ago")
panic("Killed by deadlock detector")
}
}
}
func (cs *ControlSession) Run() {
// deadlock
heartbeat := make(chan struct{}, 1)
quitDeadlock := make(chan struct{})
go cs.deadlockDetector(heartbeat, quitDeadlock)
rechokeChan := time.Tick(10 * time.Second)
verboseChan := time.Tick(10 * time.Minute)
keepAliveChan := time.Tick(60 * time.Second)
// Start out polling tracker every 20 seconds until we get a response.
// Maybe be exponential backoff here?
retrackerChan := time.Tick(20 * time.Second)
trackerInfoChan := make(chan *TrackerResponse)
trackerClient := NewTrackerClient("", [][]string{cs.trackers})
trackerClient.Announce(cs.makeClientStatusReport("started"))
for {
select {
case <-retrackerChan:
trackerClient.Announce(cs.makeClientStatusReport(""))
case dhtInfoHashPeers := <-cs.dht.PeersRequestResults:
newPeerCount := 0
// key = infoHash. The torrent client currently only
// supports one download at a time, so let's assume
// it's the case.
for _, peers := range dhtInfoHashPeers {
for _, peer := range peers {
peer = dht.DecodePeerAddress(peer)
if cs.hintNewPeer(peer) {
newPeerCount++
}
}
}
case ti := <-trackerInfoChan:
cs.logf("Got response from tracker: %#v\n", ti)
newPeerCount := 0
for _, peer := range ti.Peers {
if cs.hintNewPeer(peer) {
newPeerCount++
}
}
for _, peer6 := range ti.Peers6 {
if cs.hintNewPeer(peer6) {
newPeerCount++
}
}
cs.log("Contacting", newPeerCount, "new peers")
interval := ti.Interval
if interval < 120 {
interval = 120
} else if interval > 24*3600 {
interval = 24 * 3600
}
cs.log("..checking again in", interval, "seconds.")
retrackerChan = time.Tick(interval * time.Second)
cs.log("Contacting", newPeerCount, "new peers")
case pm := <-cs.peerMessageChan:
peer, message := pm.peer, pm.message
peer.lastReadTime = time.Now()
err2 := cs.DoMessage(peer, message)
if err2 != nil {
if err2 != io.EOF {
cs.log("Closing peer", peer.address, "because", err2)
}
cs.ClosePeer(peer)
}
case <-rechokeChan:
// TODO: recalculate who to choke / unchoke
heartbeat <- struct{}{}
if cs.peers.Len() < TARGET_NUM_PEERS {
go cs.dht.PeersRequest(string(cs.ID.Infohash), true)
}
case <-verboseChan:
cs.log("Peers:", cs.peers.Len())
case <-keepAliveChan:
now := time.Now()
for _, peer := range cs.peers.All() {
if peer.lastReadTime.Second() != 0 && now.Sub(peer.lastReadTime) > 3*time.Minute {
// log.Println("Closing peer", peer.address, "because timed out.")
cs.ClosePeer(peer)
continue
}
go peer.keepAlive(now)
}
case <-cs.quit:
cs.log("Quitting torrent session")
quitDeadlock <- struct{}{}
return
}
}
}
func (cs *ControlSession) Quit() error {
cs.quit <- struct{}{}
for _, peer := range cs.peers.All() {
cs.ClosePeer(peer)
}
if cs.dht != nil {
cs.dht.Stop()
}
return nil
}
func (cs *ControlSession) makeClientStatusReport(event string) ClientStatusReport {
return ClientStatusReport{
Event: event,
InfoHash: string(cs.ID.Infohash),
PeerId: cs.PeerID,
Port: cs.Port,
}
}
func (cs *ControlSession) connectToPeer(peer string) {
conn, err := NewTCPConn([]byte(cs.ID.Psk[:]), peer)
if err != nil {
// log.Println("Failed to connect to", peer, err)
return
}
header := cs.Header()
_, err = conn.Write(header)
if err != nil {
cs.log("Failed to send header to", peer, err)
return
}
theirheader, err := readHeader(conn)
if err != nil {
// log.Printf("Failed to read header from %s: %s\n", peer, err)
return
}
peersInfoHash := string(theirheader[8:28])
id := string(theirheader[28:48])
// If it's us, we don't need to continue
if id == cs.PeerID {
conn.Close()
return
}
btconn := &btConn{
header: theirheader,
infohash: peersInfoHash,
id: id,
conn: conn,
}
cs.session.SavePeer(conn.RemoteAddr().String(), cs.peers.HasPeer)
cs.AddPeer(btconn)
}
func (cs *ControlSession) backoffHintNewPeer(peer string) {
go func() {
for backoff := 1; backoff < 5; backoff++ {
cs.hintNewPeer(peer)
wait := 10 * int(math.Pow(float64(2), float64(backoff)))
// cs.logf("backoff for %s: %d", peer, wait)
<-time.After(time.Duration(wait) * time.Second)
}
return
}()
}
func (cs *ControlSession) hintNewPeer(peer string) (isnew bool) {
if cs.peers.Know(peer, "") {
return false
}
go cs.connectToPeer(peer)
return true
}
func (cs *ControlSession) AcceptNewPeer(btconn *btConn) {
// If it's us, we don't need to continue
if btconn.id == cs.PeerID {
btconn.conn.Close()
return
}
_, err := btconn.conn.Write(cs.Header())
if err != nil {
cs.logf("Error writing header: %s\n", err)
btconn.conn.Close()
return
}
cs.AddPeer(btconn)
}
func (cs *ControlSession) AddPeer(btconn *btConn) {
theirheader := btconn.header
peer := btconn.conn.RemoteAddr().String()
if cs.peers.Len() >= MAX_NUM_PEERS {
cs.log("We have enough peers. Rejecting additional peer", peer)
btconn.conn.Close()
return
}
ps := NewPeerState(btconn.conn)
ps.address = peer
ps.id = btconn.id
if keep := cs.peers.Add(ps); !keep {
return
}
// If 128, then it supports DHT.
if int(theirheader[7])&0x01 == 0x01 {
// It's OK if we know this node already. The DHT engine will
// ignore it accordingly.
go cs.dht.AddNode(ps.address)
}
go ps.peerWriter(cs.peerMessageChan)
go ps.peerReader(cs.peerMessageChan)
if int(theirheader[5])&0x10 == 0x10 {
ps.SendExtensions(cs.ourExtensions, 0)
}
go func() {
cs.NewPeers <- peer
}()
cs.logf("AddPeer: added %s", btconn.conn.RemoteAddr().String())
}
func (cs *ControlSession) ClosePeer(peer *peerState) {
cs.peers.Delete(peer)
peer.Close()
cs.backoffHintNewPeer(peer.address)
}
func (cs *ControlSession) DoMessage(p *peerState, message []byte) (err error) {
if message == nil {
return io.EOF // The reader or writer goroutine has exited
}
if len(message) == 0 { // keep alive
return
}
if message[0] != EXTENSION {
cs.logf("Wrong message type: %d\n", message[0])
return errInvalidType
}
switch message[1] {
case EXTENSION_HANDSHAKE:
err = cs.DoHandshake(message[1:], p)
default:
err = cs.DoOther(message[1:], p)
}
return
}
func (cs *ControlSession) DoHandshake(msg []byte, p *peerState) (err error) {
var h ExtensionHandshake
err = bencode.NewDecoder(bytes.NewReader(msg[1:])).Decode(&h)
if err != nil {
cs.log("Error when unmarshaling extension handshake")
return err
}
p.theirExtensions = make(map[string]int)
for name, code := range h.M {
p.theirExtensions[name] = code
}
// Now that handshake is done and we know their extension, send the
// current ih message, if we have one
//
// We need to de-serialize the current ih message saved in db before
// passing it to the sender otherwise it is serialized into a string
var currentIHMessage IHMessage
currentFromSession := cs.session.GetCurrentIHMessage()
if len(currentFromSession) > 0 {
err = bencode.NewDecoder(strings.NewReader(currentFromSession)).Decode(¤tIHMessage)
if err != nil {
cs.log("Error deserializing current ih message to be resent", err)
} else {
p.sendExtensionMessage("bs_metadata", currentIHMessage)
}
}
return nil
}
func (cs *ControlSession) DoOther(msg []byte, p *peerState) (err error) {
if ext, ok := cs.ourExtensions[int(msg[0])]; ok {
switch ext {
case "bs_metadata":
err = cs.DoMetadata(msg[1:], p)
case "ut_pex":
err = cs.DoPex(msg[1:], p)
default:
err = errors.New(fmt.Sprintf("unknown extension: %s", ext))
}
} else {
err = errors.New(fmt.Sprintf("Unknown extension: %d", int(msg[0])))
}
return
}
type IHMessage struct {
Info NewInfo `bencode:"info"`
// The port we are listening on
Port int64 `bencode:"port"`
// The signature of the info dict
Sig string `bencode:"sig"`
}
type NewInfo struct {
InfoHash string `bencode:"infohash"`
// The revision, ala CouchDB
// ie <counter>-<hash>
Rev string `bencode:"rev"`
}
func NewIHMessage(port int64, ih, rev string, priv id.PrivKey) (mm IHMessage, err error) {
info := NewInfo{
InfoHash: ih,
Rev: rev,
}
var buf bytes.Buffer
err = bencode.NewEncoder(&buf).Encode(info)
if err != nil {
log.Println("[CONTROL] Couldn't encode ih message, returning now")
return mm, err
}
var privarg [ed.PrivateKeySize]byte
copy(privarg[:], priv[:])
sig := ed.Sign(&privarg, buf.Bytes())
return IHMessage{
Info: info,
Port: port,
Sig: string(sig[:]),
}, nil
}
func (cs *ControlSession) DoMetadata(msg []byte, p *peerState) (err error) {
var message IHMessage
err = bencode.NewDecoder(bytes.NewReader(msg)).Decode(&message)
if err != nil {
cs.log("Couldn't decode metadata message: ", err)
return
}
if message.Port == 0 {
return
}
// take his IP addr, use the advertised port
ip := p.conn.RemoteAddr().(*net.TCPAddr).IP.String()
port := strconv.Itoa(int(message.Port))
peer := ip + ":" + port
if cs.isNewerThan(message.Info.Rev) {
return
}
var tmpInfoBuf bytes.Buffer
err = bencode.NewEncoder(&tmpInfoBuf).Encode(message.Info)
if err != nil {
cs.log("Couldn't encode ih message, returning now")
return err
}
rawInfo := tmpInfoBuf.Bytes()
pub := [ed.PublicKeySize]byte(cs.ID.Pub)
var sig [ed.SignatureSize]byte
copy(sig[0:ed.SignatureSize], message.Sig)
ok := ed.Verify(&pub, rawInfo, &sig)
if !ok {
return errors.New("Bad Signature")
}
var test IHMessage
err = bencode.NewDecoder(bytes.NewReader(msg)).Decode(&test)
cs.session.SaveIHMessage(msg)
cs.Torrents <- Announce{
infohash: message.Info.InfoHash,
peer: peer,
}
return
}
func (cs *ControlSession) isNewerThan(rev string) bool {
remoteParts := strings.Split(rev, "-")
if len(remoteParts) != 2 {
return true
}
remoteCounter, err := strconv.Atoi(remoteParts[0])
if err != nil {
return true
}
localParts := strings.Split(cs.rev, "-")
if len(localParts) != 2 {
return true
}
localCounter, err := strconv.Atoi(localParts[0])
if err != nil {
return true
}
return localCounter >= remoteCounter
}
func (cs *ControlSession) DoPex(msg []byte, p *peerState) (err error) {
return
}
func (cs *ControlSession) Matches(ih string) bool {
return string(cs.ID.Infohash) == ih
}
func (cs *ControlSession) SetCurrent(ih string) error {
if cs.currentIH == ih {
return nil
}
parts := strings.Split(cs.rev, "-")
if len(parts) != 2 {
cs.logf("Invalid rev: %s\n", cs.rev)
parts = []string{"0", ""}
}
counter, err := strconv.Atoi(parts[0])
if err != nil {
counter = 0
}
newCounter := strconv.Itoa(counter + 1)
cs.logf("Updating rev with ih %x", ih)
newRev := newCounter + "-" + fmt.Sprintf("%x", sha1.Sum([]byte(ih+parts[1])))
mess, err := NewIHMessage(int64(cs.Port), ih, newRev, cs.ID.Priv)
if err != nil {
return err
}
var buf bytes.Buffer
err = bencode.NewEncoder(&buf).Encode(mess)
if err != nil {
return err
}
err = cs.session.SaveIHMessage(buf.Bytes())
if err != nil {
return err
}
cs.currentIH = ih
cs.rev = newRev
cs.broadcast(mess)
return nil
}
func (cs *ControlSession) broadcast(message IHMessage) {
for _, ps := range cs.peers.All() {
if _, ok := ps.theirExtensions["bs_metadata"]; !ok {
continue
}
ps.sendExtensionMessage("bs_metadata", message)
}
}
|
package api
import (
"time"
"github.com/google/uuid"
)
type NewBusinessRequest struct{
BusinessName string `json:"business_name" binding:"required"`
BusinessURI string `json:"business_uri" binding:"required"`
Metadata map[string]interface{} `json:"metadata"`
}
type BusinessUpdateRequest struct{
BusinessURI string `json:"business_uri" binding:"required"`
}
type BusinessMetaPatchRequest struct{
Operation []map[string]interface{} `json:"operation" binding:"required"`
}
type BusinessInfo struct{
BusinessName string `json:"business_name"`
BusinessId uuid.UUID `json:"business_id"`
BusinessURI string `json:"business_uri"`
LastUpdate time.Time `json:"last_update"`
Added time.Time `json:"added"`
Metadata map[string]interface{} `json:"metadata"`
}
type Notification struct{
NotificationId uuid.UUID `json:"notification_id"`
EventTimestamp time.Time `json:"event_timestamp"`
Notification map[string]interface{} `json:"notification"`
Hash string `json:"hash"`
}
|
package main
import (
"fmt"
)
// channels block i.e. any send(or receive) in the channel blocks the further execution of
// go-routine until another go-routing is receiving(or sending) data from the channel
// like here, main1() will not work, as on channel, 42 is being written(send), but there's
// no go-routine pulling data from the channel, so deadlock
func main1() {
// create a channel of type int
// means this channel can be used to transfer single int value between go-routines
c := make(chan int)
c <- 42 // putting value in the channel
fmt.Println(<-c) // extracting value from channel
}
// in main(), there are two go-routines. One is putting data in the channel, and another is reading
// from the channel
func main() {
c := make(chan int)
go func() {
c <- 42
}()
fmt.Println(<-c)
}
|
package main
import (
"gopkg.in/jdkato/prose.v2"
"fmt"
"math/rand"
"time"
)
var dataFileLoc string = "data/dataset.json"
var typeFileLoc string = "data/types.json"
var submissionFileLoc string = "output.json"
func main() {
start := time.Now()
defer fmt.Println(time.Since(start))
fmt.Println("Process begins\n")
defer fmt.Println("\nProcess Terminated")
datasetJson := ReadJson(dataFileLoc)
typesJson := ReadJson(typeFileLoc)
var s []Output
patterns := []string{
"XXXX XXXX XXXX XXXX",
"XXXX XXXX XXXX",
"XXXX XXXX'",
"XXXX",
"XX/XX/XXXX",
"XX/XX/"}
for i := 0; i < len(datasetJson.Data); i++ {
var obj Output
var entity string
var types string
found := false
entry := datasetJson.Data[i]
fmt.Println("Entry No: ", i)
doc, _ := prose.NewDocument(entry)
for _, ent := range doc.Entities() {
if InArray(ent.Text, patterns) {
switch ent.Label {
case "PERSON":
types = typesJson.Data[5]
entity = RandomStr(typesJson.Data[5])
case "ORGANIZATION":
types = typesJson.Data[19] + " " + typesJson.Data[20]
entity = RandomStr(typesJson.Data[19])
case "GPE":
types = typesJson.Data[19] + " " + typesJson.Data[20]
entity = RandomStr(typesJson.Data[19])
}
found = true
}
}
if !found {
ran := rand.Intn(len(typesJson.Data))
types = typesJson.Data[ran]
entity = RandomStr(types)
}
obj.Text = Replace(entry, entity)
obj.Entity = entity
obj.Types = types
s = append(s, obj)
}
CreateJson(s)
}
|
package bridge
import (
"errors"
"go.uber.org/zap"
"rocketmqtt/conf"
"strings"
)
const (
Kafka = iota
Rocketmq
)
var Delivers deliver
type PublishMessage interface {
publish(topics map[string]bool, key string, msg *Elements) error
}
type deliver struct {
rocketMQClients map[string]*rocketMQ
kafkaClients map[string]*kafka
}
var targets targetMemPool
func (d *deliver) ExistTargets() bool {
if d.kafkaClients != nil || d.rocketMQClients != nil {
return true
}
return false
}
func (d *deliver) GetrocketMQClients() map[string]*rocketMQ {
return d.rocketMQClients
}
func (d *deliver) Publish(e *Elements) error {
var bitMark int64
switch e.Action {
case Connect:
//log.Debug("Connect", zap.String(e.ClientID, e.Action))
//if config.ConnectTopic != "" {
// topics[config.ConnectTopic] = true
//}
case Publish:
// foreach regexp map config
if v, ok := targets.getMatch(e.Topic); ok {
bitMark = v
} else {
bitMark = 0
for _, target := range conf.RunConfig.DeliverMap {
match := matchTopicSplit(target.NameSplit, e.Topic)
if match {
bitMark = bitMark | 1
}
bitMark = bitMark << 1
}
targets.storeClientTopicMatch(e.ClientID, e.Topic, bitMark)
}
case Subscribe:
//log.Debug("Connect", zap.String(e.ClientID, e.Action))
//if config.SubscribeTopic != "" {
// topics[config.SubscribeTopic] = true
//}
case Unsubscribe:
//log.Debug("Connect", zap.String(e.ClientID, e.Action))
//if config.UnsubscribeTopic != "" {
// topics[config.UnsubscribeTopic] = true
//}
case Disconnect:
//log.Debug("Connect", zap.String(e.ClientID, e.Action))
//if config.DisconnectTopic != "" {
// topics[config.DisconnectTopic] = true
//}
targets.deleteClient(e.ClientID)
default:
return errors.New("error action: " + e.Topic)
}
var err error
if bitMark == 0 {
log.Warn("No match deliver rule", zap.String("ClientID", e.ClientID), zap.String("Topic", e.Topic))
return nil
}
for i := len(conf.RunConfig.DeliverMap); i >= 0; i-- {
bit := bitMark & 1
if bit == 1 {
dm := conf.RunConfig.DeliverMap[i]
switch dm.Plugin {
case "kafka":
err = Delivers.kafkaClients[dm.Target].publish(dm.Topic, e.ClientID, e)
case "rocketmq":
err = Delivers.rocketMQClients[dm.Target].publish(dm.Topic, e.ClientID, e, dm.Tag)
}
}
bitMark = bitMark >> 1
}
return err
}
func match(subTopic []string, topic []string) bool {
if len(subTopic) == 0 {
if len(topic) == 0 {
return true
}
return false
}
if len(topic) == 0 {
if subTopic[0] == "#" {
return true
}
return false
}
if subTopic[0] == "#" {
return true
}
if (subTopic[0] == "+") || (subTopic[0] == topic[0]) {
return match(subTopic[1:], topic[1:])
}
return false
}
func matchTopic(subTopic string, topic string) bool {
return match(strings.Split(subTopic, "/"), strings.Split(topic, "/"))
}
func matchTopicSplit(subTopic *[]string, topic string) bool {
return match(*subTopic, strings.Split(topic, "/"))
}
|
package config
import (
"reflect"
"github.com/bonjourmalware/melody/internal/tagparser"
)
// LoadYAMLTagsOf loads the yaml tags of a struct
func LoadYAMLTagsOf(what interface{}) ([]string, error) {
var tags []string
for i := 0; i < reflect.TypeOf(what).NumField(); i++ {
ruleTag := reflect.TypeOf(what).Field(i).Tag
_, exists := ruleTag.Lookup("yaml")
if !exists {
continue
}
tagValue, err := tagparser.ParseYamlTagValue(ruleTag)
if err != nil {
return tags, err
}
tags = append(tags, tagValue)
}
return tags, nil
}
// LoadValidConfigKeysMap returns a map of the json keys present in the Config struct
func LoadValidConfigKeysMap() map[string]interface{} {
configKeysMap := make(map[string]interface{})
tags, err := LoadYAMLTagsOf(Config{})
if err != nil {
panic(err)
}
for _, tag := range tags {
configKeysMap[tag] = new(interface{})
}
return configKeysMap
}
|
package main
import (
"fmt"
"time"
)
// go routineによるセマフォの実装パターンを試す
// 最大入室5人までチャットルームでログイン中ユーザと総数を出す
const (
maxConcurrency int = 5
jobsize int = 500
)
var (
doneIds chan string = make(chan string, jobsize)
nowloginIDs chan string = make(chan string, jobsize)
sem chan struct{} = make(chan struct{}, maxConcurrency)
)
func main() {
// 20桁ID採番
ids := func() []string {
s := jobsize
rs := make([]string, 0, s)
for i := 1; i <= s; i++ {
rs = append(rs, fmt.Sprintf("%020d", i))
}
return rs
}()
// 同時実行数制御のもとログイン実行
for _, id := range ids {
go login(id)
}
// 終了待ち
for i := 0; i < cap(doneIds); i++ {
select {
case <-doneIds:
}
}
println("done!!!")
}
func login(id string) {
sem <- struct{}{}
// この中の処理はmaxConcurrencyの数しか同時実行できない.
time.Sleep(time.Millisecond * 1000)
nowloginIDs <- id
fmt.Printf("logined.id=%s counter=%d\n", id, len(nowloginIDs))
<-nowloginIDs
<-sem
doneIds <- id
}
func enque(ss []string, s string) []string {
ret := append(ss, s)
return ret
}
func deque(ss []string) []string {
if len(ss) == 0 {
return ss
}
return ss[1:]
}
|
package model
import (
"html/template"
"time"
)
type HomePageData struct {
Articles []Article `json:"articles"`
}
type Article struct {
tableName struct{} `sql:"articles,alias:article"`
ID int64 `json:"id" sql:",pk"`
Title string `json:"title" sql:",notnull"`
Author string `json:"author" sql:",notnull"`
Date time.Time `json:"date" sql:",notnull" time_format:"2066-02-01"`
CommentsCount int `json:"comments_count" sql:",notnull"`
Text template.HTML `json:"text" sql:",notnull"`
}
|
package databroker_test
import (
"context"
"errors"
"sync/atomic"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/grpc/databroker/mock_databroker"
)
func TestLeaser(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
exitErr := errors.New("EXIT")
t.Run("acquires lease", func(t *testing.T) {
client := mock_databroker.NewMockDataBrokerServiceClient(ctrl)
client.EXPECT().
AcquireLease(gomock.Any(), &databroker.AcquireLeaseRequest{
Name: "TEST",
Duration: durationpb.New(time.Second * 30),
}).
Return(&databroker.AcquireLeaseResponse{
Id: "lease1",
}, nil).
Times(1)
client.EXPECT().
ReleaseLease(gomock.Any(), &databroker.ReleaseLeaseRequest{
Name: "TEST",
Id: "lease1",
}).
Times(1)
handler := mock_databroker.NewMockLeaserHandler(ctrl)
handler.EXPECT().
GetDataBrokerServiceClient().
Return(client).
AnyTimes()
handler.EXPECT().
RunLeased(gomock.Any()).
Return(exitErr).
Times(1)
leaser := databroker.NewLeaser("TEST", time.Second*30, handler)
err := leaser.Run(context.Background())
assert.Equal(t, exitErr, err)
})
t.Run("retries acquire", func(t *testing.T) {
client := mock_databroker.NewMockDataBrokerServiceClient(ctrl)
client.EXPECT().
AcquireLease(gomock.Any(), &databroker.AcquireLeaseRequest{
Name: "TEST",
Duration: durationpb.New(time.Second * 30),
}).
Return(nil, status.Error(codes.Unavailable, "UNAVAILABLE")).
Times(2)
client.EXPECT().
AcquireLease(gomock.Any(), &databroker.AcquireLeaseRequest{
Name: "TEST",
Duration: durationpb.New(time.Second * 30),
}).
Return(&databroker.AcquireLeaseResponse{
Id: "lease1",
}, nil).
Times(1)
client.EXPECT().
ReleaseLease(gomock.Any(), &databroker.ReleaseLeaseRequest{
Name: "TEST",
Id: "lease1",
}).
Times(1)
handler := mock_databroker.NewMockLeaserHandler(ctrl)
handler.EXPECT().
GetDataBrokerServiceClient().
Return(client).
AnyTimes()
handler.EXPECT().
RunLeased(gomock.Any()).
Return(exitErr).
Times(1)
leaser := databroker.NewLeaser("TEST", time.Second*30, handler)
err := leaser.Run(context.Background())
assert.Equal(t, exitErr, err)
})
t.Run("renews", func(t *testing.T) {
client := mock_databroker.NewMockDataBrokerServiceClient(ctrl)
client.EXPECT().
AcquireLease(gomock.Any(), &databroker.AcquireLeaseRequest{
Name: "TEST",
Duration: durationpb.New(time.Millisecond),
}).
Return(&databroker.AcquireLeaseResponse{
Id: "lease1",
}, nil).
Times(1)
client.EXPECT().
RenewLease(gomock.Any(), &databroker.RenewLeaseRequest{
Name: "TEST",
Id: "lease1",
Duration: durationpb.New(time.Millisecond),
}).
MinTimes(1)
client.EXPECT().
ReleaseLease(gomock.Any(), &databroker.ReleaseLeaseRequest{
Name: "TEST",
Id: "lease1",
}).
Times(1)
handler := mock_databroker.NewMockLeaserHandler(ctrl)
handler.EXPECT().
GetDataBrokerServiceClient().
Return(client).
AnyTimes()
handler.EXPECT().
RunLeased(gomock.Any()).
DoAndReturn(func(ctx context.Context) error {
time.Sleep(time.Millisecond * 20)
return exitErr
}).
Times(1)
leaser := databroker.NewLeaser("TEST", time.Millisecond, handler)
err := leaser.Run(context.Background())
assert.Equal(t, exitErr, err)
})
}
func TestLeasers(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
exitErr := errors.New("EXIT")
client := mock_databroker.NewMockDataBrokerServiceClient(ctrl)
client.EXPECT().
AcquireLease(gomock.Any(), &databroker.AcquireLeaseRequest{
Name: "TEST",
Duration: durationpb.New(time.Second * 30),
}).
Return(&databroker.AcquireLeaseResponse{
Id: "lease1",
}, nil).
Times(1)
client.EXPECT().
ReleaseLease(gomock.Any(), &databroker.ReleaseLeaseRequest{
Name: "TEST",
Id: "lease1",
}).
Times(1)
var counter int64
fn1 := func(ctx context.Context) error {
atomic.AddInt64(&counter, 1)
return exitErr
}
fn2 := func(ctx context.Context) error {
atomic.AddInt64(&counter, 10)
<-ctx.Done()
return ctx.Err()
}
leaser := databroker.NewLeasers("TEST", time.Second*30, client, fn1, fn2)
err := leaser.Run(context.Background())
assert.Equal(t, exitErr, err)
assert.EqualValues(t, 11, counter)
}
|
/**
* @Author: XGH
* @Email: 55821284@qq.com
* @Date: 2020/5/14 14:00
*/
package main
import "testing"
func TestAli(t *testing.T) {
type args struct {
str string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "支付宝测试",
args: args{str: ""},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Ali(tt.args.str); got != tt.want {
t.Errorf("Ali() = %v, want %v", got, tt.want)
}
})
}
}
func TestWeChat(t *testing.T) {
type args struct {
str string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "微信测试",
args: args{str: ""},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := WeChat(tt.args.str); got != tt.want {
t.Errorf("WeChat() = %v, want %v", got, tt.want)
}
})
}
}
|
package etcd
import (
"fmt"
"testing"
"golang.org/x/net/context"
"github.com/zdao-pro/sky_blue/pkg/naming"
"go.etcd.io/etcd/clientv3"
)
func TestRegister(t *testing.T) {
c := clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"},
}
b, err := New(&c)
if nil != err {
fmt.Println(err.Error())
}
in := naming.Instance{
AppID: "jim",
Hostname: "hh",
}
_, err = b.Register(context.Background(), &in)
if nil != err {
fmt.Println(err.Error())
}
}
|
// Package main applies a transform.Transformation to a geometry.Point with more
// documentation from the help flag.
package main
import (
"errors"
"fmt"
"os"
"github.com/jwowillo/viztransform/cmd"
"github.com/jwowillo/viztransform/parse"
"github.com/jwowillo/viztransform/transform"
)
// main applies the transform.Transformation read from STDIN to the
// geometry.Point in the args.
func main() {
if len(os.Args) != 2 {
cmd.Fail(errArgs)
}
arg := os.Args[1]
p, err := parse.Point(arg)
if err != nil {
cmd.Fail(err)
}
t, err := parse.Transformation(os.Stdin)
if err != nil {
cmd.Fail(err)
}
fmt.Println(transform.Apply(t, p))
}
// errArgs is the error when not exactly a single geometry.Point is passed.
var errArgs = errors.New("must pass point to transform")
// init the command.
func init() {
cmd.Init(usage)
}
// usage to print.
const usage = `viztransform_apply usage:
viztransform_apply '(x y)'
The passed point will be transformed by a transformation read from
STDIN as a newline-separated and EOF-terminated list of transformations
to be composed.`
|
package main
import "fmt"
func main() {
var x, y int
fmt.Scanf("%d.%d", &x, &y)
fmt.Printf("%d.%d\n", y, x)
}
|
package config
import (
"time"
"github.com/kelseyhightower/envconfig"
)
// Config represents the configuration required for florence
type Config struct {
BindAddr string `envconfig:"BIND_ADDR"`
APIRouterURL string `envconfig:"API_ROUTER_URL"`
APIRouterVersion string `envconfig:"API_ROUTER_VERSION"`
FrontendRouterURL string `envconfig:"ROUTER_URL"`
DatasetControllerURL string `envconfig:"DATASET_CONTROLLER_URL"`
TableRendererURL string `envconfig:"TABLE_RENDERER_URL"`
GracefulShutdownTimeout time.Duration `envconfig:"GRACEFUL_SHUTDOWN_TIMEOUT"`
HealthCheckInterval time.Duration `envconfig:"HEALTHCHECK_INTERVAL"`
HealthCheckCriticalTimeout time.Duration `envconfig:"HEALTHCHECK_CRITICAL_TIMEOUT"`
SharedConfig SharedConfig
}
// SharedConfig represents the configuration made available to the client-side application from the server
type SharedConfig struct {
EnableDatasetImport bool `envconfig:"ENABLE_DATASET_IMPORT" json:"enableDatasetImport"`
EnableNewSignIn bool `envconfig:"ENABLE_NEW_SIGN_IN" json:"enableNewSignIn"`
EnableNewUpload bool `envconfig:"ENABLE_NEW_UPLOAD" json:"enableNewUpload"`
EnableNewInteractives bool `envconfig:"ENABLE_NEW_INTERACTIVES" json:"enableNewInteractives"`
EnablePermissionsAPI bool `envconfig:"ENABLE_PERMISSION_API" json:"enablePermissionsAPI"`
EnableCantabularJourney bool `envconfig:"ENABLE_CANTABULAR_JOURNEY" json:"enableCantabularJourney"`
}
var cfg *Config
// Get retrieves the config from the environment for florence
func Get() (*Config, error) {
if cfg != nil {
return cfg, nil
}
cfg = &Config{
BindAddr: ":8080",
APIRouterURL: "http://localhost:23200",
APIRouterVersion: "v1",
FrontendRouterURL: "http://localhost:20000",
DatasetControllerURL: "http://localhost:24000",
TableRendererURL: "http://localhost:23300",
SharedConfig: SharedConfig{EnableDatasetImport: true, EnableNewSignIn: false, EnableNewUpload: false, EnableNewInteractives: false, EnablePermissionsAPI: false, EnableCantabularJourney: false},
GracefulShutdownTimeout: 10 * time.Second,
HealthCheckInterval: 30 * time.Second,
HealthCheckCriticalTimeout: 90 * time.Second,
}
return cfg, envconfig.Process("", cfg)
}
|
package main
import "fmt"
// 647. 回文子串
// 给定一个字符串,你的任务是计算这个字符串中有多少个回文子串。
// 具有不同开始位置或结束位置的子串,即使是由相同的字符组成,也会被计为是不同的子串。
// 注意:
// 输入的字符串长度不会超过1000。
// https://leetcode-cn.com/problems/palindromic-substrings/
func main() {
fmt.Println(countSubstrings("aaa")) // 6
fmt.Println(countSubstrings("abc")) // 3
fmt.Println(countSubstrings("abac")) // 5
fmt.Println(countSubstrings("fdsklf")) // 6
}
// 法一:暴力,best
// 以s中的每个字符为回文串中点,检查计算
func countSubstrings(s string) (count int) {
n := len(s)
if n == 0 {
return 0
} else if n == 1 {
return 1
}
for i := 0; i < n; i++ {
count++
// 回文串长度为奇数
count += extendSubString(s, n, i-1, i+1)
// 回文串长度为偶数,以s[i]为中点左侧字符
count += extendSubString(s, n, i, i+1)
}
return count
}
func extendSubString(s string, n, left, right int) (count int) {
for left >= 0 && right < n && s[left] == s[right] {
count++
left--
right++
}
return count
}
// 法二:动态规划
// 长度更长的回文串总是在长度稍短的回文串的基础上形成
// dp[i][j]表示 s[i:j]是否为回文子串
// dp[i][j]在 dp[i+1][j-1]的基础上判断扩展
func countSubstrings2(s string) (count int) {
n := len(s)
if n == 0 {
return 0
} else if n == 1 {
return 1
}
dp := make([][]bool, n)
for k := range dp {
dp[k] = make([]bool, n)
}
for j := 0; j < n; j++ { // j包裹i循环是为了保证 dp[i+1][j-1] 已经算出
for i := j; i >= 0; i-- {
if s[i] == s[j] && (j-i < 2 || dp[i+1][j-1]) {
dp[i][j] = true
count++
}
}
}
return count
}
|
package main
import (
"fmt"
"log"
"golang.org/x/sys/windows/registry"
)
func main() {
instDir := getInstallationDir()
defaultFontPath := fontName + ".ttf"
defaultOverviewDirectory := "./overviews/"
if instDir != "" {
defaultFontPath = fmt.Sprintf("%v\\%v.ttf", instDir, fontName)
defaultOverviewDirectory = fmt.Sprintf("%v\\assets\\maps\\", instDir)
}
conf := parseArgs(&Config{
FontPath: defaultFontPath,
OverviewDir: defaultOverviewDirectory,
})
err := run(conf)
if err != nil {
log.Fatalln(err)
}
}
func getInstallationDir() string {
instDirKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\csgoverview`, registry.QUERY_VALUE)
if err != nil {
log.Println("Probably not an installation. Failed to open csgoverview registry key:", err)
return ""
}
defer instDirKey.Close()
instDir, _, err := instDirKey.GetStringValue("InstallLocation")
if err != nil {
log.Fatalln("Failed to get install directory from registry key:", err)
}
return instDir
}
|
package timecop_test
import (
"github.com/bluele/go-timecop"
"testing"
"time"
)
func TestFreeze(t *testing.T) {
now := timecop.Now()
timecop.Freeze(now)
if timecop.Now() != now {
t.Errorf("Expected time is not %v.", now)
}
timecop.Return()
if !timecop.Now().Before(time.Now()) {
t.Error("timecop should be reolve freezing.")
}
}
func TestTravel(t *testing.T) {
now := timecop.Now()
future := now.AddDate(1, 0, 0)
timecop.Travel(future)
if timecop.Now() == now {
t.Errorf("Expected time is not %v.", now)
}
if !timecop.Now().After(future) {
t.Errorf("Expected time should be greater than %v.", future)
}
}
|
package main
import (
"LogDemo/Utils"
"LogDemo/conf"
"LogDemo/etcd"
"LogDemo/kafka"
"LogDemo/taillog"
"fmt"
"gopkg.in/ini.v1"
"sync"
"time"
)
var (
cfg = new(conf.AppConf)
)
func main() {
// 加载配置文件
err := ini.MapTo(&cfg, "./conf/config.ini")
if err != nil {
fmt.Printf("load ini failed, err: %v \n", err)
return
}
fmt.Println("读取到的配置信息", cfg)
// 1. 初始化kafka连接
address := []string{cfg.KafkaConf.Address}
topic := cfg.Topic
err = kafka.Init(address, topic, cfg.ChanMaxSize)
if err != nil {
fmt.Printf("init Kafka failed, err:%v \n", err)
return
}
fmt.Println("init kafka success.")
// 初始化etcd
err = etcd.Init(cfg.EtcdConf.Address, time.Duration(cfg.EtcdConf.Timeout) * time.Second)
if err != nil {
fmt.Println("init etcd failed, err:%v \n", err)
return
}
// 为了实现每个logagent都拉取自己独有的配置,所以要以自己的IP地址作为区分
ipStr, err := Utils.GetOutboundIP()
if err != nil {
panic(err)
}
etcdConfKey := fmt.Sprintf(cfg.EtcdConf.Key, ipStr)
// 从etcd中获取日志收集项的配置信息
logEntryConf, err := etcd.GetConf(etcdConfKey)
if err != nil {
fmt.Println("etcd.GetConf failed, err:%v \n", err)
return
}
fmt.Printf("get conf from etcd success, %v \n", logEntryConf)
// 派一个哨兵去监视日志收集项的变化(有变化及时通知我的logAgent的热加载配置)
// 打印出配置
for index, value := range logEntryConf {
fmt.Printf("index:%v, value:%v \n", index, value)
}
// 收集日志,发往kafka中【因为NewConfChan访问了tskMgr的NewConfChan,这个channel是在初始化完成时才执行初始化】
taillog.Init(logEntryConf)
// 从taillog包中获取对外暴露的通道
newConfChan := taillog.NewConf()
// 获取一个等待组
var wg sync.WaitGroup
wg.Add(1)
// 哨兵发现最新的配置信息会通知上面的那个通道
go etcd.WatchConf(etcdConfKey, newConfChan)
wg.Wait()
}
|
package kafka
import (
"context"
"crypto/tls"
"fmt"
"github.com/Shopify/sarama"
"github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/pkg/errors"
skafka "github.com/segmentio/kafka-go"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/encoding"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/util"
"github.com/batchcorp/plumber/validate"
)
// Write is the entry point function for performing write operations in Kafka.
//
// This is where we verify that the passed args and flags combo makes sense,
// attempt to establish a connection, parse protobuf before finally attempting
// to perform the write.
func (k *Kafka) Write(ctx context.Context, writeOpts *opts.WriteOptions, errorCh chan<- *records.ErrorRecord, messages ...*records.WriteRecord) error {
if err := validateWriteOptions(writeOpts); err != nil {
return errors.Wrap(err, "unable to verify write options")
}
if writeOpts.EncodeOptions != nil && writeOpts.EncodeOptions.EncodeType == encoding.EncodeType_ENCODE_TYPE_CLOUDEVENT {
return k.writeCloudEvents(ctx, writeOpts, errorCh, messages...)
}
writer, err := NewWriter(k.dialer, k.connArgs, writeOpts.Kafka.Args.Topics...)
if err != nil {
return errors.Wrap(err, "unable to create new writer")
}
defer writer.Close()
for _, topic := range writeOpts.Kafka.Args.Topics {
for _, msg := range messages {
if err := k.write(ctx, writer, writeOpts.Kafka.Args, topic, []byte(writeOpts.Kafka.Args.Key), []byte(msg.Input)); err != nil {
util.WriteError(k.log, errorCh, fmt.Errorf("unable to write message to topic '%s': %s", topic, err))
}
}
}
return nil
}
func (k *Kafka) getSaramaConfig() *sarama.Config {
cfg := sarama.NewConfig()
cfg.Version = sarama.V2_6_0_0 // Need this in order for offset bits to work
connOpts := k.connOpts.GetKafka()
if connOpts.UseTls {
cfg.Net.TLS.Enable = true
cfg.Net.TLS.Config = &tls.Config{
InsecureSkipVerify: connOpts.TlsSkipVerify,
}
}
if connOpts.SaslType != args.SASLType_NONE {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = connOpts.SaslUsername
cfg.Net.SASL.Password = connOpts.SaslPassword
cfg.Net.SASL.Mechanism = sarama.SASLTypePlaintext
if connOpts.SaslType == args.SASLType_SCRAM {
cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
}
}
cfg.Producer.Return.Successes = true
return cfg
}
func (k *Kafka) writeCloudEvents(ctx context.Context, writeOpts *opts.WriteOptions, errorCh chan<- *records.ErrorRecord, messages ...*records.WriteRecord) error {
client, err := sarama.NewClient(k.connOpts.GetKafka().Address, k.getSaramaConfig())
if err != nil {
err = errors.Wrap(err, "unable to initiate kafka connection")
util.WriteError(k.log, errorCh, err)
return err
}
defer client.Close()
for _, topic := range writeOpts.Kafka.Args.Topics {
sender, err := kafka_sarama.NewSenderFromClient(client, topic)
if err != nil {
err = errors.Wrap(err, "unable to create new cloudevents sender")
util.WriteError(k.log, errorCh, err)
return err
}
c, err := cloudevents.NewClient(sender, cloudevents.WithTimeNow(), cloudevents.WithUUIDs())
if err != nil {
util.WriteError(k.log, errorCh, errors.Wrap(err, "failed to create cloudevents client"))
continue
}
for i, msg := range messages {
e, err := util.GenCloudEvent(writeOpts.EncodeOptions.CloudeventSettings, msg)
if err != nil {
util.WriteError(k.log, errorCh, errors.Wrap(err, "unable to generate cloudevents event"))
continue
}
result := c.Send(kafka_sarama.WithMessageKey(ctx, sarama.StringEncoder(e.ID())), *e)
if cloudevents.IsUndelivered(result) {
util.WriteError(k.log, errorCh, fmt.Errorf("unable to write cloudevents message to topic '%s': %s", topic, result))
}
k.log.Debugf("sent: %d, accepted: %t", i, cloudevents.IsACK(result))
}
sender.Close(ctx)
}
return nil
}
func validateWriteOptions(opts *opts.WriteOptions) error {
if opts == nil {
return validate.ErrEmptyWriteOpts
}
if opts.Kafka == nil {
return validate.ErrEmptyBackendGroup
}
if opts.Kafka.Args == nil {
return validate.ErrEmptyBackendArgs
}
if len(opts.Kafka.Args.Topics) == 0 {
return errors.New("at least one topic must be defined")
}
return nil
}
// Write writes a message to a kafka topic. It is a wrapper for WriteMessages.
func (k *Kafka) write(ctx context.Context, writer *skafka.Writer, writeArgs *args.KafkaWriteArgs, topic string, key, value []byte) error {
msg := skafka.Message{
Topic: topic,
Key: key,
Value: value,
}
headers := make([]skafka.Header, 0)
for headerName, headerValue := range writeArgs.Headers {
headers = append(headers, skafka.Header{
Key: headerName,
Value: []byte(headerValue),
})
}
if len(headers) != 0 {
msg.Headers = headers
}
if err := writer.WriteMessages(ctx, msg); err != nil {
return errors.Wrap(err, "unable to publish message(s)")
}
k.log.Infof("Successfully wrote message to topic '%s'", topic)
return nil
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"go/ast"
"go/token"
"os"
"reflect"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/loader"
"honnef.co/go/tools/analysis/report"
)
type skipType int
const (
skipNone skipType = iota
skipLinter
skipFile
)
// Directive is a comment of the form '//lint:<command> [arguments...]' and `//nolint:<command>`.
// It represents instructions to the static analysis tool.
type Directive struct {
Command skipType
Linters []string
Directive *ast.Comment
Node ast.Node
}
func parseDirective(s string) (cmd skipType, args []string) {
if strings.HasPrefix(s, "//lint:") {
s = strings.TrimPrefix(s, "//lint:")
fields := strings.Split(s, " ")
switch fields[0] {
case "ignore":
return skipLinter, fields[1:]
case "file-ignore":
return skipFile, fields[1:]
}
return skipNone, nil
}
s = strings.TrimPrefix(s, "//nolint:")
return skipLinter, []string{s}
}
// ParseDirectives extracts all directives from a list of Go files.
func ParseDirectives(files []*ast.File, fset *token.FileSet) []Directive {
var dirs []Directive
for _, f := range files {
cm := ast.NewCommentMap(fset, f, f.Comments)
for node, cgs := range cm {
for _, cg := range cgs {
for _, c := range cg.List {
if !strings.HasPrefix(c.Text, "//lint:") && !strings.HasPrefix(c.Text, "//nolint:") {
continue
}
cmd, args := parseDirective(c.Text)
d := Directive{
Command: cmd,
Linters: args,
Directive: c,
Node: node,
}
dirs = append(dirs, d)
}
}
}
}
return dirs
}
func doDirectives(pass *analysis.Pass) (interface{}, error) {
return ParseDirectives(pass.Files, pass.Fset), nil
}
// Directives is a fact that contains a list of directives.
var Directives = &analysis.Analyzer{
Name: "directives",
Doc: "extracts linter directives",
Run: doDirectives,
RunDespiteErrors: true,
ResultType: reflect.TypeOf([]Directive{}),
}
// SkipAnalyzer updates an analyzer from `staticcheck` and `golangci-linter` to make it work on nogo.
// They have "lint:ignore" or "nolint" to make the analyzer ignore the code.
func SkipAnalyzer(analyzer *analysis.Analyzer) {
analyzer.Requires = append(analyzer.Requires, Directives)
oldRun := analyzer.Run
analyzer.Run = func(p *analysis.Pass) (interface{}, error) {
pass := *p
oldReport := p.Report
pass.Report = func(diag analysis.Diagnostic) {
dirs := pass.ResultOf[Directives].([]Directive)
for _, dir := range dirs {
cmd := dir.Command
linters := dir.Linters
switch cmd {
case skipLinter:
ignorePos := report.DisplayPosition(pass.Fset, dir.Node.Pos())
nodePos := report.DisplayPosition(pass.Fset, diag.Pos)
if ignorePos.Filename != nodePos.Filename || ignorePos.Line != nodePos.Line {
continue
}
for _, check := range strings.Split(linters[0], ",") {
if strings.TrimSpace(check) == analyzer.Name {
return
}
}
case skipFile:
ignorePos := report.DisplayPosition(pass.Fset, dir.Node.Pos())
nodePos := report.DisplayPosition(pass.Fset, diag.Pos)
if ignorePos.Filename == nodePos.Filename {
return
}
default:
continue
}
}
oldReport(diag)
}
return oldRun(&pass)
}
}
// FormatCode is to format code for nogo.
func FormatCode(code string) string {
if strings.Contains(code, "`") {
return code // TODO: properly escape or remove
}
return fmt.Sprintf("`%s`", code)
}
// MakeFakeLoaderPackageInfo creates a fake loader.PackageInfo for a given package.
func MakeFakeLoaderPackageInfo(pass *analysis.Pass) *loader.PackageInfo {
var errs []error
typeInfo := pass.TypesInfo
return &loader.PackageInfo{
Pkg: pass.Pkg,
Importable: true, // not used
TransitivelyErrorFree: true, // not used
// use compiled (preprocessed) go files AST;
// AST linters use not preprocessed go files AST
Files: pass.Files,
Errors: errs,
Info: *typeInfo,
}
}
// ReadFile reads a file and adds it to the FileSet
// so that we can report errors against it using lineStart.
func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) {
//nolint: gosec
content, err := os.ReadFile(filename)
if err != nil {
return nil, nil, err
}
tf := fset.AddFile(filename, -1, len(content))
tf.SetLinesForContent(content)
return content, tf, nil
}
// FindOffset returns the offset of a given position in a file.
func FindOffset(fileText string, line, column int) int {
// we count our current line and column position
currentCol := 1
currentLine := 1
for offset, ch := range fileText {
// see if we found where we wanted to go to
if currentLine == line && currentCol == column {
return offset
}
// line break - increment the line counter and reset the column
if ch == '\n' {
currentLine++
currentCol = 1
} else {
currentCol++
}
}
return -1 //not found
}
// GetPackageName returns the package name used in this file.
func GetPackageName(imports []*ast.ImportSpec, path, defaultName string) string {
quoted := `"` + path + `"`
for _, imp := range imports {
if imp.Path.Value == quoted {
if imp.Name != nil {
return imp.Name.Name
}
return defaultName
}
}
return ""
}
|
package shell
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestHelpNameShouldReturnHelp(t *testing.T) {
assert.Equal(t, "help", help(0).name())
}
func TestHelpsDescriptionShouldNotBeEmpty(t *testing.T) {
assert.NotEqual(t, "", help(0).description())
}
func TestHelpUsageShouldNotBeEmpty(t *testing.T) {
assert.NotEqual(t, "", help(0).usage())
}
func TestHelpRunShouldReturnNil(t *testing.T) {
assert.Nil(t, help(0).run(nil, nil))
}
func TestHelpShouldRegisterItself(t *testing.T) {
_, ok := allCommands[help(0).name()]
assert.True(t, ok)
}
|
package datastructures
// Stack operations
type Stack interface {
IsEmpty() bool
Peek() interface{}
Push(value interface{})
Pop() interface{}
String() string
}
|
package venti
import (
"errors"
venti "sigint.ca/venti2"
)
type Backend interface {
ReadBlock(venti.Score, []byte) (int, error)
WriteBlock(typ uint8, data []byte) (venti.Score, error)
}
var (
ENotFound = errors.New("block not found")
)
type MemBackend map[venti.Score][]byte
func (b MemBackend) ReadBlock(s venti.Score, p []byte) (int, error) {
buf, ok := b[s]
if !ok {
return 0, ENotFound
}
return copy(p, buf), nil
}
func (b MemBackend) WriteBlock(typ uint8, data []byte) (venti.Score, error) {
s := venti.Fingerprint(data)
b[s] = data
return s, nil
}
|
package cli
import (
"io/ioutil"
"os"
"path/filepath"
"gopkg.in/yaml.v2"
"github.com/ch3lo/overlord/configuration"
"github.com/ch3lo/overlord/logger"
"github.com/ch3lo/overlord/version"
"github.com/codegangsta/cli"
)
var config *configuration.Configuration
func globalFlags() []cli.Flag {
flags := []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "Debug de la app",
},
cli.StringFlag{
Name: "log-level",
Value: "info",
Usage: "Nivel de verbosidad de log",
EnvVar: "OVERLORD_LOG_LEVEL",
},
cli.StringFlag{
Name: "log-formatter",
Value: "text",
Usage: "Formato de log",
EnvVar: "OVERLORD_LOG_FORMATTER",
},
cli.BoolFlag{
Name: "log-colored",
Usage: "Coloreo de log :D",
EnvVar: "OVERLORD_LOG_COLORED",
},
cli.StringFlag{
Name: "log-output",
Value: "console",
Usage: "Output de los logs. console | file",
EnvVar: "OVERLORD_LOG_OUTPUT",
},
cli.StringFlag{
Name: "config",
Value: "overlord.yaml",
Usage: "Ruta del archivo de configuración",
},
}
return flags
}
func setupConfiguration(configFile string) (*configuration.Configuration, error) {
_, err := os.Stat(configFile)
if os.IsNotExist(err) {
return nil, err
}
configFile, err = filepath.Abs(configFile)
if err != nil {
return nil, err
}
var yamlFile []byte
if yamlFile, err = ioutil.ReadFile(configFile); err != nil {
return nil, err
}
var config configuration.Configuration
if err = yaml.Unmarshal(yamlFile, &config); err != nil {
return nil, err
}
return &config, nil
}
func setupApplication(c *cli.Context) error {
logConfig := logger.Config{
Level: c.String("log-level"),
Formatter: c.String("log-formatter"),
Colored: c.Bool("log-colored"),
Output: c.String("log-output"),
Debug: c.Bool("debug"),
}
err := logger.Configure(logConfig)
if err != nil {
return err
}
if config, err = setupConfiguration(c.String("config")); err != nil {
return err
}
return nil
}
func RunApp() {
app := cli.NewApp()
app.Name = "overlord"
app.Usage = "Monitor de contenedores"
app.Version = version.VERSION + " (" + version.GITCOMMIT + ")"
app.Flags = globalFlags()
app.Before = func(c *cli.Context) error {
return setupApplication(c)
}
app.Commands = commands
if err := app.Run(os.Args); err != nil {
logger.Instance().Fatalln(err)
}
}
|
package main
import "fmt"
//1.下面的代码输出什么?
func main() {
var a []int = nil
//a = []int{1, 2}
a, a[0] = []int{1,2}, 9
fmt.Println(a)
}
//参考答案即解析:运行时错误。知识点:多重赋值。
//
//多重赋值分为两个步骤,有先后顺序:
//2.下面代码中的指针 p 为野指针,因为返回的栈内存在函数结束时会被释放?
//type TimesMatcher struct {
// base int
//}
//
//func NewTimesMatcher(base int) *TimesMatcher {
// return &TimesMatcher{base:base}
//}
//
//func main() {
// p := NewTimesMatcher(3)
// fmt.Println(p)
//}
//A. false
//
//B. true
//参考答案及解析:
//A。Go语言的内存回收机制规定,只要有一个指针指向引用一个变量,那么这个变量就不会被释放(内存逃逸),因此在 Go 语言中返回函数参数或临时变量是安全的。
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.