text stringlengths 11 4.05M |
|---|
package adapter
import (
"context"
"github.com/kyleterry/tenyks/pkg/message"
)
type Adapter interface {
GetName() string
GetType() AdapterType
Dial(ctx context.Context) error
Close(ctx context.Context) error
SendAsync(ctx context.Context, msg message.Message) error
RegisterMessageHandler(message.HandlerFunc)
}
type AdapterType int
const (
AdapterTypeIRC AdapterType = iota
)
var AdapterTypeMapping = map[string]AdapterType{
"irc": AdapterTypeIRC,
}
|
package vmm
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/768bit/vutils"
"github.com/cloudius-systems/capstan/cmd"
"github.com/cloudius-systems/capstan/core"
"github.com/cloudius-systems/capstan/util"
)
func BuildBaseCapstanImage(name string, cmdPath string, entryPoint string, imageSize int64) (*core.Image, *util.Repo, string, error) {
//template := makeCapstanTemplate("cloudius/osv", cmdPath, files)
repo := util.NewRepo(util.DefaultRepositoryUrl)
image := &core.Image{
Name: name,
Hypervisor: "qemu",
}
bootOpts := cmd.BootOptions{
Cmd: cmdPath,
Boot: []string{},
EnvList: []string{},
PackageDir: entryPoint,
}
err := cmd.ComposePackage(repo, []string{}, imageSize, true, true, true, entryPoint, name, &bootOpts, "zfs", "")
//err := cmd.Compose(repo, "", 64000000, entryPoint, name, cmdPath, true)
//err := cmd.Build(repo, image, template, true, "512M")
imgPath := repo.ImagePath(image.Hypervisor, image.Name)
return image, repo, imgPath, err
}
func GetCapstanImagePath(name string) (*core.Image, *util.Repo, string) {
repo := util.NewRepo(util.DefaultRepositoryUrl)
image := &core.Image{
Name: name,
Hypervisor: "qemu",
}
imgPath := repo.ImagePath(image.Hypervisor, image.Name)
return image, repo, imgPath
}
func capstanPkgCompose(repo *util.Repo, imageSize int64, updatePackage, verbose, pullMissing bool,
packageDir, appName string, bootOpts *cmd.BootOptions, filesystem string) error {
// Package content should be collected in a subdirectory called mpm-pkg.
//targetPath := filepath.Join(packageDir, "mpm-pkg")
//vutils.Files.CreateDirIfNotExist(targetPath)
// Remove collected directory afterwards.
//defer os.RemoveAll(targetPath)
// Construct final bootcmd for the image.
commandLine, err := bootOpts.GetCmd()
if err != nil {
return err
}
// First, collect the contents of the package.
//if err := cmd.CollectPackage(repo, packageDir, pullMissing, false, verbose); err != nil {
// return err
//}
// If all is well, we have to start preparing the files for upload.
paths, err := cmd.CollectDirectoryContents(packageDir)
if err != nil {
return err
}
// Get the path of imported image.
imagePath := repo.ImagePath("qemu", appName)
// Check whether the image already exists.
imageExists := false
if _, err = os.Stat(imagePath); !os.IsNotExist(err) {
imageExists = true
}
if filesystem == "zfs" {
imageCachePath := repo.ImageCachePath("qemu", appName)
var imageCache core.HashCache
// If the user requested new image or requested to update a non-existent image,
// initialize it first.
if !updatePackage || !imageExists {
// Initialize an empty image based on the provided loader image. imageSize is used to
// determine the size of the user partition. Use default loader image.
if err := repo.InitializeZfsImage("", appName, imageSize); err != nil {
return fmt.Errorf("Failed to initialize empty image named %s.\nError was: %s", appName, err)
}
} else {
// We are updating an existing image so try to parse the cache
// config file. Note that we are not interested in any errors as
// no-cache or invalid cache means that all files will be uploaded.
imageCache, _ = core.ParseHashCache(imageCachePath)
}
// Upload the specified path onto virtual image.
imageCache, err = cmd.UploadPackageContents(repo, imagePath, paths, imageCache, verbose)
if err != nil {
return err
}
// Save the new image cache
imageCache.WriteToFile(imageCachePath)
}
if err = util.SetCmdLine(imagePath, commandLine); err != nil {
return err
}
fmt.Printf("Command line set to: '%s'\n", commandLine)
return nil
}
func makeCapstanTemplate(base string, cmd string, files map[string]string) *core.Template {
return &core.Template{
Base: base,
Cmdline: cmd,
Files: files,
Rootfs: "ROOTFS",
}
}
func getCapstanDevPath() (string, error) {
_, callerFile, _, _ := runtime.Caller(0)
executablePath := filepath.Dir(callerFile)
executablePath = filepath.Join(executablePath, "..", "workspace", "osv", "build", "release")
if !vutils.Files.CheckPathExists(executablePath) {
return executablePath, errors.New("OSV Dev Path missing")
}
return executablePath, nil
}
|
package maccount
import (
"time"
"webserver/models"
)
type UserReport struct {
Id int
UserId int
ForUserId int
ArticleId int
From int
Reason string
Remark string
Extra string
Status int
CreatedAt time.Time
UpdatedAt time.Time
}
func FindUserReportByUserId(userId, from interface{}) ([]UserReport, error) {
var reports []UserReport
if err := models.GetDb().Where("user_id = ? and `from` = ? and status = 1", userId, from).Find(&reports).Error; err != nil {
return reports, err
}
return reports, nil
}
func FindUserReportList(from, status interface{}, offset, limit int) ([]UserReport, error) {
var reports []UserReport
if err := models.GetDb().Where("`from` = ? and status = ?", from, status).Order("id asc").Offset(offset).Limit(limit).Find(&reports).Error; err != nil {
return reports, err
}
return reports, nil
}
func FindReportCount(from, status interface{}) int {
var count int
models.GetDb().Table("user_reports").Where("`from` = ? and status = ?", from, status).Count(&count)
return count
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package mockhttp
import (
"crypto/tls"
"encoding/base64"
"fmt"
"log"
"net/http"
"net/http/httptest"
"strings"
"sync"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/on-demand-service-broker/loggerfactory"
)
type Server struct {
name, expectedAuthorizationHeader string
excludedAuthPaths map[string]bool
*httptest.Server
*sync.Mutex
mockHandlers []MockedResponseBuilder
currentHandler int
logger *log.Logger
}
func StartServer(name string) *Server {
s := &Server{
name: name,
Mutex: new(sync.Mutex),
excludedAuthPaths: make(map[string]bool),
}
s.Server = httptest.NewServer(s)
s.logger = log.New(GinkgoWriter, "["+name+"] ", log.LstdFlags)
return s
}
func StartTLSServer(name, certPath, keyPath string) *Server {
s := &Server{
name: name,
Mutex: new(sync.Mutex),
excludedAuthPaths: make(map[string]bool),
}
cer, err := tls.LoadX509KeyPair(certPath, keyPath)
Expect(err).NotTo(HaveOccurred())
config := &tls.Config{Certificates: []tls.Certificate{cer}}
sslServer := httptest.NewUnstartedServer(s)
sslServer.TLS = config
sslServer.Config.ErrorLog = loggerfactory.New(GinkgoWriter, "server", loggerfactory.Flags).New()
s.Server = sslServer
s.Server.StartTLS()
s.logger = log.New(GinkgoWriter, "["+name+"] ", log.LstdFlags)
return s
}
func (s *Server) ExpectedAuthorizationHeader(header string) {
s.expectedAuthorizationHeader = header
}
func (s *Server) ExpectedBasicAuth(username, password string) {
s.ExpectedAuthorizationHeader(basicAuth(username, password))
}
func (s *Server) ExcludeAuthorizationCheck(path string) {
s.excludedAuthPaths[path] = true
}
func (s *Server) checkAuthHeaderForPath(path string) bool {
return !s.excludedAuthPaths[path]
}
func (s *Server) verifyCommonServerExpectations(r *http.Request) {
if s.expectedAuthorizationHeader != "" && s.checkAuthHeaderForPath(r.URL.Path) {
Expect(r.Header.Get("Authorization")).To(Equal(s.expectedAuthorizationHeader), "Expected 'Authorization' header to be equal to:\n %+v\n", s.expectedAuthorizationHeader)
}
}
func (s *Server) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
s.Lock()
defer s.Unlock()
defer GinkgoRecover()
if s.currentHandler >= len(s.mockHandlers) {
received := req.Method + " " + req.URL.String()
completedMocks := strings.Join(s.completedMocks(), "\n")
pendingMocks := strings.Join(s.pendingMocks(), "\n")
Fail(fmt.Sprintf("Unmocked request:\n\t%s\nReceived by:\n\t%s\nCompleted:\n%s\nPending:\n%s\n", received, s.name, completedMocks, pendingMocks))
}
s.logger.Printf("%s %s\n", req.Method, req.URL.String())
currentHandlerIndex := s.currentHandler
s.currentHandler += 1
s.mockHandlers[currentHandlerIndex].Verify(req, s)
s.mockHandlers[currentHandlerIndex].Respond(writer, s.logger)
}
func (s *Server) completedMocks() []string {
var completedMocks []string
for i := 0; i < s.currentHandler; i++ {
completedMocks = append(completedMocks, "\t"+s.mockHandlers[i].Url())
}
return completedMocks
}
func (s *Server) pendingMocks() []string {
var pendingMocks []string
for i := s.currentHandler; i < len(s.mockHandlers); i++ {
pendingMocks = append(pendingMocks, "\t"+s.mockHandlers[i].Url())
}
return pendingMocks
}
func (s *Server) VerifyAndMock(mockedResponses ...MockedResponseBuilder) {
s.Lock()
defer s.Unlock()
s.VerifyMocks()
s.currentHandler = 0
s.mockHandlers = mockedResponses
}
func (s *Server) AppendMocks(mockedResponses ...MockedResponseBuilder) {
s.Lock()
defer s.Unlock()
s.mockHandlers = append(s.mockHandlers, mockedResponses...)
}
func (s *Server) VerifyMocks() {
if len(s.mockHandlers) != s.currentHandler {
completedMocks := strings.Join(s.completedMocks(), "\n")
pendingMocks := strings.Join(s.pendingMocks(), "\n")
Fail(fmt.Sprintf("Uninvoked mocks for:\n\t%s\nCompleted:\n%s\nPending:\n%s\n", s.name, completedMocks, pendingMocks))
}
}
type MockedResponseBuilder interface {
Verify(req *http.Request, d *Server)
Respond(writer http.ResponseWriter, logger *log.Logger)
Url() string
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
}
|
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
)
const (
initurl = "http://localhost:9200/fazhi_company/_search?scroll=1m"
url = "http://localhost:9200/_search/scroll"
query = `{"size":1000,"query": {"match_all" : {}}}`
)
type scroll struct {
scrollId string
datas []interface{}
}
func getScroll(url string,query []byte,ch chan interface{}) scroll {
req,err := http.NewRequest("POST",url,bytes.NewBuffer(query))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp,err :=client.Do(req)
if err != nil{
log.Fatalf("Error geeting response: %s",err)
}
defer resp.Body.Close()
body,_:= ioutil.ReadAll(resp.Body)
log.Printf("请求返回%s",body)
var result map[string]interface{}
json.Unmarshal([]byte(body),&result)
scrollId := result["_scroll_id"].(string)
hits := result["hits"].(map[string]interface{})["hits"].([]interface{})
datas := make([]interface{},0)
if len(hits)>0 {
for _,value := range hits {
v := value.(map[string]interface{})
source := v["_source"]
ch <- source
datas = append(datas,source)
}
}
return scroll{
scrollId:scrollId,
datas:datas,
}
}
type Query struct {
Scroll string `json:"scroll"`
ScrollId string `json:"scroll_id"`
}
func SaveJsonFile(datas []interface{},sequence int){
log.Println("开始保存文件........")
filename := "./data2/data"+strconv.Itoa(sequence)+".json"
f,err := os.Create(filename)
defer f.Close()
if err != nil {
fmt.Println("os Create error: ", err)
return
}
bw := bufio.NewWriter(f)
for _,v := range datas {
r, _ := json.Marshal(v)
bw.WriteString(string(r) + "\n")
}
bw.Flush()
}
func AppendToFile(filename string,ch chan interface{}){
f,err := os.OpenFile(filename,os.O_RDWR|os.O_CREATE|os.O_APPEND,0777)
if err != nil {
fmt.Println("os OpenFile error: ", err)
return
}
defer f.Close()
for data := range ch {
log.Printf("收到数据:%s",data)
r,err := json.Marshal(data)
if err!=nil {
log.Fatalf("转json字符串出错:%s",err)
}
f.WriteString(string(r)+"\n")
}
}
func getData(ch chan interface{}){
var jsonStr = []byte(query)
reslut := getScroll(initurl,jsonStr,ch)
var sequence = 0
for ;len(reslut.datas) > 0 ;{
scrollQuery := Query{
Scroll:"1m",
ScrollId:reslut.scrollId,
}
log.Println(reslut.scrollId)
sequence += 1
/*for index,value := range reslut.datas {
log.Printf("index=%d,value=%s",index,value)
}*/
q,err := json.Marshal(scrollQuery)
if err != nil{
log.Fatalf("Error %s",err)
}
log.Println("请求参数:"+ string(q))
reslut = getScroll(url,q,ch)
}
close(ch)
}
func main() {
ch := make(chan interface{},1000)
go getData(ch)
AppendToFile("./datas/data2.json",ch)
//var sequence = 0
/*for data := range ch {
sequence += 1
log.Printf("收到数据:%s",data)
SaveJsonFile(data,sequence)
}*/
} |
package elering
const URI = "https://dashboard.elering.ee/api"
type NpsPrice struct {
Success bool
Data map[string][]Price
}
type Price struct {
Timestamp int64
Price float64
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectcmd
import (
"context"
"fmt"
"os"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/testmachinery"
"github.com/gardener/test-infra/pkg/testmachinery/metadata"
"github.com/gardener/test-infra/pkg/testrunner/result"
kutil "github.com/gardener/test-infra/pkg/util/kubernetes"
"github.com/spf13/cobra"
"github.com/gardener/test-infra/pkg/testrunner"
"github.com/gardener/test-infra/pkg/util"
tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
)
var (
tmKubeconfigPath string
namespace string
testrunName string
)
var collectConfig = result.Config{}
// AddCommand adds collect to a command.
func AddCommand(cmd *cobra.Command) {
cmd.AddCommand(collectCmd)
}
var collectCmd = &cobra.Command{
Use: "collect",
Short: "Collects results from a completed testrun.",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
defer ctx.Done()
logger.Log.Info("Start testmachinery testrunner")
logger.Log.V(3).Info(util.PrettyPrintStruct(collectConfig))
tmClient, err := kutil.NewClientFromFile(tmKubeconfigPath, client.Options{
Scheme: testmachinery.TestMachineryScheme,
})
if err != nil {
logger.Log.Error(err, fmt.Sprintf("Cannot build kubernetes client from %s", tmKubeconfigPath))
os.Exit(1)
}
tr := &tmv1beta1.Testrun{}
err = tmClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: testrunName}, tr)
if err != nil {
logger.Log.Error(err, "unable to fetch testrun %s from cluster", "testrun", testrunName)
os.Exit(1)
}
run := &testrunner.Run{
Testrun: tr,
Metadata: metadata.FromTestrun(tr),
}
collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath)
if err != nil {
logger.Log.Error(err, "unable to initialize collector")
os.Exit(1)
}
_, err = collector.Collect(ctx, logger.Log.WithName("Collect"), tmClient, namespace, []*testrunner.Run{run})
if err != nil {
logger.Log.Error(err, "unable to collect result", "testrun", testrunName)
os.Exit(1)
}
logger.Log.Info("finished collecting testrun results.")
},
}
func init() {
// configuration flags
collectCmd.Flags().StringVar(&tmKubeconfigPath, "tm-kubeconfig-path", os.Getenv("KUBECONFIG"), "Path to the testmachinery cluster kubeconfig")
if err := collectCmd.MarkFlagFilename("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "tm-kubeconfig-path")
}
collectCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "Namespace where the testrun should be deployed.")
collectCmd.Flags().StringVarP(&testrunName, "tr-name", "t", "", "Name of the testrun to collect results.")
if err := collectCmd.MarkFlagRequired("tr-name"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "tr-name")
}
collectCmd.Flags().StringVar(&collectConfig.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.")
// parameter flags
collectCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.")
// asset upload
collectCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.")
collectCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "On error dir which is used by Concourse.")
collectCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.")
collectCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.")
collectCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.")
// slack notification
collectCmd.Flags().StringVar(&collectConfig.SlackToken, "slack-token", "", "Client token to authenticate")
collectCmd.Flags().StringVar(&collectConfig.SlackChannel, "slack-channel", "", "Client channel id to send the message to.")
collectCmd.Flags().StringVar(&collectConfig.ConcourseURL, "concourse-url", "", "Concourse job URL.")
collectCmd.Flags().BoolVar(&collectConfig.PostSummaryInSlack, "post-summary-in-slack", false, "Post testruns summary in slack.")
// DEPRECATED FLAGS
collectCmd.Flags().StringP("output-dir-path", "o", "./testout", "The filepath where the summary should be written to.")
collectCmd.Flags().String("es-config-name", "sap_internal", "DEPRECATED: The elasticsearch secret-server config name.")
collectCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance")
collectCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance")
collectCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance")
collectCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.")
collectCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.")
_ = collectCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore")
_ = collectCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore")
}
|
package main
import (
"fmt"
"unsafe"
)
func main() {
// unsafe.Sizeof函数返回操作数在内存中的字节大小,参数可以是任意类型的表达式,但是它并不会对表达式进行求值。
a := 12
fmt.Println("length of a:", unsafe.Sizeof(a)) //8
var b int = 12
fmt.Println("length of b(int):", unsafe.Sizeof(b)) //8
var c int8 = 12
fmt.Println("length of c(int8)", unsafe.Sizeof(c)) //1
}
|
package delta
import (
"io/ioutil"
"encoding/json"
)
type Triggers struct {
Triggers []Trigger `json:"triggers"`
}
type Trigger struct {
EventType string `json:"eventtype"`
Subscriber string `json:"subscriber"`
}
func LoadTriggers() *Triggers {
content, err := ioutil.ReadFile("triggers.d/triggers.json")
if err != nil {
panic(err)
}
triggers := Triggers{}
err = json.Unmarshal(content, &triggers)
if err != nil {
//return errors.Wrap(err, "SetRules failed")
panic(err)
}
return &triggers
}
|
package main
import "fmt"
// chap3 상수
func main() {
// 상수 선언 방법
// "const" 를 사용한다.
// example1
const i int = 0
fmt.Println(i)
// example2
const j float32 = 0.3
fmt.Println(j)
// example3
const k, l = 3, "상수!"
fmt.Println(k, l)
// example4
const (
z = "i am the king!"
x = 13124
)
fmt.Println(z, x)
// example5
// ※ Go 에서는 iota (identifier) 를 이용해 변수, 상수의 값을 자동을 1씩 증가 시킬 수 있다!
const (
a = iota
b
c
)
fmt.Println(a, b, c)
}
|
package main
import (
"floqars/models"
"floqars/shared"
"encoding/json"
"fmt"
"os"
"strconv"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/mmcloughlin/geohash"
)
func GetPeople(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
latStr, lngStr := req.QueryStringParameters["lat"], req.QueryStringParameters["lng"]
lat, err := strconv.ParseFloat(latStr, 64)
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: 500,
}, err
}
lng, err := strconv.ParseFloat(lngStr, 64)
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: 500,
}, err
}
hash := geohash.EncodeInt(float64(lat), float64(lng))
fmt.Println(hash)
people := []models.Person{}
res, err := shared.DAL.Scan("people").Execute()
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: 500,
}, err
}
for _, p := range res.Items {
person := models.Person{}
person.FromDocument(p)
people = append(people, person)
}
b, err := json.Marshal(&people)
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: 500,
Body: err.Error(),
}, err
}
return events.APIGatewayProxyResponse{
StatusCode: 200,
Body: string(b),
Headers: map[string]string{
"floqars-region": os.Getenv("AWS_REGION"),
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
},
}, nil
}
func main() {
shared.Connect()
lambda.Start(GetPeople)
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
)
func main() {
// Get user input for name and address
reader := bufio.NewReader(os.Stdin)
fmt.Println("Please enter your name: ")
inputName, err1 := reader.ReadString('\n')
fmt.Println("Please enter your address: ")
inputAddr, err2 := reader.ReadString('\n')
if err1 != nil && err2 != nil {
fmt.Println("There was an error - please try again.")
}
// Create map for storing user's details
personMap := make(map[string]string)
// Add details
personMap["name"] = strings.ReplaceAll(inputName, "\n", "")
personMap["address"] = strings.ReplaceAll(inputAddr, "\n", "")
// Convert map to JSON object
bObject, err := json.Marshal(personMap)
if err != nil {
fmt.Println("There was an error converting map to JSON object.")
}
fmt.Printf("Here is the JSON object: \n %v", string(bObject))
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcutil
import (
"context"
"math"
"net"
"github.com/google/gapid/core/log"
"google.golang.org/grpc"
)
// PrepareTask is called to add the services to a grpc server before it starts running.
type PrepareTask func(context.Context, net.Listener, *grpc.Server) error
// Serve prepares and runs a grpc server on the specified address.
// It also installs the standard options we normally use.
func Serve(ctx context.Context, address string, prepare PrepareTask, options ...grpc.ServerOption) error {
listener, err := net.Listen("tcp", address)
if err != nil {
log.F(ctx, true, "Could not start grpc server. Error: %v", err)
}
return ServeWithListener(ctx, listener, prepare, options...)
}
// ServeWithListener prepares and runs a grpc server using the specified net.Listener.
// It also installs the standard options we normally use.
func ServeWithListener(ctx context.Context, listener net.Listener, prepare PrepareTask, options ...grpc.ServerOption) error {
options = append([]grpc.ServerOption{
grpc.RPCCompressor(grpc.NewGZIPCompressor()),
grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
grpc.MaxRecvMsgSize(math.MaxInt32),
}, options...)
defer listener.Close()
grpcServer := grpc.NewServer(options...)
if err := prepare(ctx, listener, grpcServer); err != nil {
return err
}
log.I(ctx, "Starting grpc server")
if err := grpcServer.Serve(listener); err != nil {
return log.Errf(ctx, err, "Abort running grpc server: %v", listener.Addr())
}
log.I(ctx, "Shutting down grpc server")
return nil
}
|
package decimalUtils
import (
"fmt"
"github.com/shopspring/decimal"
"math/big"
"strconv"
)
func Pow(a *big.Float, e int64) *big.Float {
result := Zero().Copy(a)
for i := int64(0); i < e-1; i++ {
result = Mul(result, a)
}
return result
}
func Root(a *big.Float, n uint64) *big.Float {
limit := Pow(NewFloat(2), 256)
n1 := n - 1
n1f, rn := NewFloat(float64(n1)), Div(NewFloat(1.0), NewFloat(float64(n)))
x, x0 := NewFloat(1.0), Zero()
_ = x0
for {
potx, t2 := Div(NewFloat(1.0), x), a
for b := n1; b > 0; b >>= 1 {
if b&1 == 1 {
t2 = Mul(t2, potx)
}
potx = Mul(potx, potx)
}
x0, x = x, Mul(rn, Add(Mul(n1f, x), t2))
if Lesser(Mul(Abs(Sub(x, x0)), limit), x) {
break
}
}
return x
}
func Abs(a *big.Float) *big.Float {
return Zero().Abs(a)
}
func NewFloat(f float64) *big.Float {
r := big.NewFloat(f)
r.SetPrec(256)
return r
}
func Div(a, b *big.Float) *big.Float {
return Zero().Quo(a, b)
}
func Zero() *big.Float {
r := big.NewFloat(0.0)
r.SetPrec(256)
return r
}
func Mul(a, b *big.Float) *big.Float {
return Zero().Mul(a, b)
}
func Add(a, b *big.Float) *big.Float {
return Zero().Add(a, b)
}
func Sub(a, b *big.Float) *big.Float {
return Zero().Sub(a, b)
}
func Lesser(x, y *big.Float) bool {
return x.Cmp(y) == -1
}
func FromString(x string) (*big.Float, bool) {
return Zero().SetString(x)
}
func ApplyDecimals(x *big.Float, y int64) (*big.Int, big.Accuracy) {
return Mul(x, Pow(NewFloat(10), y)).Int(nil)
}
func RemoveDecimals(x *big.Int, y int64) *big.Float {
if y == 0 {
return Div(new(big.Float).SetInt(x), NewFloat(1))
}
return Div(new(big.Float).SetInt(x), Pow(NewFloat(10), y))
}
// 获取20代币精度值(比如精度18变成1000000000000000000)
func GetDecimals(tokenDecimals *big.Int) decimal.Decimal {
return decimal.New(1, int32(tokenDecimals.Int64()))
}
func Decimal(value float64) float64 {
value, _ = strconv.ParseFloat(fmt.Sprintf("%.2f", value), 64)
return value
}
|
package main
import (
"math/rand"
"fmt"
"time"
)
func bubble(tab *[10]int) {
for i := 0; i < 10; i++ {
for j := 1; j < 10-i; j ++ {
if tab[j-1] > tab[j] {
tmp := tab[j-1]
tab[j-1] = tab[j]
tab[j] = tmp
}
}
}
}
func main() {
random := rand.New(rand.NewSource(time.Now().UnixNano()))
var tab [10]int
for i := 0; i < 10; i++ {
tab[i] = random.Intn(10)
}
fmt.Println("Before")
fmt.Println(tab)
bubble(&tab)
fmt.Println("After")
fmt.Println(tab)
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tcp
import (
"time"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/seqnum"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
const (
// wcDelayedACKTimeout is the recommended maximum delayed ACK timer
// value as defined in the RFC. It stands for worst case delayed ACK
// timer (WCDelAckT). When FlightSize is 1, PTO is inflated by
// WCDelAckT time to compensate for a potential long delayed ACK timer
// at the receiver.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.
wcDelayedACKTimeout = 200 * time.Millisecond
// tcpRACKRecoveryThreshold is the number of loss recoveries for which
// the reorder window is inflated and after that the reorder window is
// reset to its initial value of minRTT/4.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2.
tcpRACKRecoveryThreshold = 16
)
// RACK is a loss detection algorithm used in TCP to detect packet loss and
// reordering using transmission timestamp of the packets instead of packet or
// sequence counts. To use RACK, SACK should be enabled on the connection.
// rackControl stores the rack related fields.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-6.1
//
// +stateify savable
type rackControl struct {
stack.TCPRACKState
// exitedRecovery indicates if the connection is exiting loss recovery.
// This flag is set if the sender is leaving the recovery after
// receiving an ACK and is reset during updating of reorder window.
exitedRecovery bool
// minRTT is the estimated minimum RTT of the connection.
minRTT time.Duration
// tlpRxtOut indicates whether there is an unacknowledged
// TLP retransmission.
tlpRxtOut bool
// tlpHighRxt the value of sender.sndNxt at the time of sending
// a TLP retransmission.
tlpHighRxt seqnum.Value
// snd is a reference to the sender.
snd *sender
}
// init initializes RACK specific fields.
func (rc *rackControl) init(snd *sender, iss seqnum.Value) {
rc.FACK = iss
rc.ReoWndIncr = 1
rc.snd = snd
}
// update will update the RACK related fields when an ACK has been received.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-09#section-6.2
func (rc *rackControl) update(seg *segment, ackSeg *segment) {
rtt := rc.snd.ep.stack.Clock().NowMonotonic().Sub(seg.xmitTime)
// If the ACK is for a retransmitted packet, do not update if it is a
// spurious inference which is determined by below checks:
// 1. When Timestamping option is available, if the TSVal is less than
// the transmit time of the most recent retransmitted packet.
// 2. When RTT calculated for the packet is less than the smoothed RTT
// for the connection.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2
// step 2
if seg.xmitCount > 1 {
if ackSeg.parsedOptions.TS && ackSeg.parsedOptions.TSEcr != 0 {
if ackSeg.parsedOptions.TSEcr < rc.snd.ep.tsVal(seg.xmitTime) {
return
}
}
if rtt < rc.minRTT {
return
}
}
rc.RTT = rtt
// The sender can either track a simple global minimum of all RTT
// measurements from the connection, or a windowed min-filtered value
// of recent RTT measurements. This implementation keeps track of the
// simple global minimum of all RTTs for the connection.
if rtt < rc.minRTT || rc.minRTT == 0 {
rc.minRTT = rtt
}
// Update rc.xmitTime and rc.endSequence to the transmit time and
// ending sequence number of the packet which has been acknowledged
// most recently.
endSeq := seg.sequenceNumber.Add(seqnum.Size(seg.payloadSize()))
if rc.XmitTime.Before(seg.xmitTime) || (seg.xmitTime == rc.XmitTime && rc.EndSequence.LessThan(endSeq)) {
rc.XmitTime = seg.xmitTime
rc.EndSequence = endSeq
}
}
// detectReorder detects if packet reordering has been observed.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2
// - Step 3: Detect data segment reordering.
// To detect reordering, the sender looks for original data segments being
// delivered out of order. To detect such cases, the sender tracks the
// highest sequence selectively or cumulatively acknowledged in the RACK.fack
// variable. The name "fack" stands for the most "Forward ACK" (this term is
// adopted from [FACK]). If a never retransmitted segment that's below
// RACK.fack is (selectively or cumulatively) acknowledged, it has been
// delivered out of order. The sender sets RACK.reord to TRUE if such segment
// is identified.
func (rc *rackControl) detectReorder(seg *segment) {
endSeq := seg.sequenceNumber.Add(seqnum.Size(seg.payloadSize()))
if rc.FACK.LessThan(endSeq) {
rc.FACK = endSeq
return
}
if endSeq.LessThan(rc.FACK) && seg.xmitCount == 1 {
rc.Reord = true
}
}
func (rc *rackControl) setDSACKSeen(dsackSeen bool) {
rc.DSACKSeen = dsackSeen
}
// shouldSchedulePTO dictates whether we should schedule a PTO or not.
// See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.
func (s *sender) shouldSchedulePTO() bool {
// Schedule PTO only if RACK loss detection is enabled.
return s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 &&
// The connection supports SACK.
s.ep.SACKPermitted &&
// The connection is not in loss recovery.
(s.state != tcpip.RTORecovery && s.state != tcpip.SACKRecovery) &&
// The connection has no SACKed sequences in the SACK scoreboard.
s.ep.scoreboard.Sacked() == 0
}
// schedulePTO schedules the probe timeout as defined in
// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.
func (s *sender) schedulePTO() {
pto := time.Second
s.rtt.Lock()
if s.rtt.TCPRTTState.SRTTInited && s.rtt.TCPRTTState.SRTT > 0 {
pto = s.rtt.TCPRTTState.SRTT * 2
if s.Outstanding == 1 {
pto += wcDelayedACKTimeout
}
}
s.rtt.Unlock()
now := s.ep.stack.Clock().NowMonotonic()
if s.resendTimer.enabled() {
if now.Add(pto).After(s.resendTimer.target) {
pto = s.resendTimer.target.Sub(now)
}
s.resendTimer.disable()
}
s.probeTimer.enable(pto)
}
// probeTimerExpired is the same as TLP_send_probe() as defined in
// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.2.
//
// +checklocks:s.ep.mu
func (s *sender) probeTimerExpired() {
if s.probeTimer.isZero() || !s.probeTimer.checkExpiration() {
return
}
var dataSent bool
if s.writeNext != nil && s.writeNext.xmitCount == 0 && s.Outstanding < s.SndCwnd {
dataSent = s.maybeSendSegment(s.writeNext, int(s.ep.scoreboard.SMSS()), s.SndUna.Add(s.SndWnd))
if dataSent {
s.Outstanding += s.pCount(s.writeNext, s.MaxPayloadSize)
s.updateWriteNext(s.writeNext.Next())
}
}
if !dataSent && !s.rc.tlpRxtOut {
var highestSeqXmit *segment
for highestSeqXmit = s.writeList.Front(); highestSeqXmit != nil; highestSeqXmit = highestSeqXmit.Next() {
if highestSeqXmit.xmitCount == 0 {
// Nothing in writeList is transmitted, no need to send a probe.
highestSeqXmit = nil
break
}
if highestSeqXmit.Next() == nil || highestSeqXmit.Next().xmitCount == 0 {
// Either everything in writeList has been transmitted or the next
// sequence has not been transmitted. Either way this is the highest
// sequence segment that was transmitted.
break
}
}
if highestSeqXmit != nil {
dataSent = s.maybeSendSegment(highestSeqXmit, int(s.ep.scoreboard.SMSS()), s.SndUna.Add(s.SndWnd))
if dataSent {
s.rc.tlpRxtOut = true
s.rc.tlpHighRxt = s.SndNxt
}
}
}
// Whether or not the probe was sent, the sender must arm the resend timer,
// not the probe timer. This ensures that the sender does not send repeated,
// back-to-back tail loss probes.
s.postXmit(dataSent, false /* shouldScheduleProbe */)
return
}
// detectTLPRecovery detects if recovery was accomplished by the loss probes
// and updates TLP state accordingly.
// See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.3.
func (s *sender) detectTLPRecovery(ack seqnum.Value, rcvdSeg *segment) {
if !(s.ep.SACKPermitted && s.rc.tlpRxtOut) {
return
}
// Step 1.
if s.isDupAck(rcvdSeg) && ack == s.rc.tlpHighRxt {
var sbAboveTLPHighRxt bool
for _, sb := range rcvdSeg.parsedOptions.SACKBlocks {
if s.rc.tlpHighRxt.LessThan(sb.End) {
sbAboveTLPHighRxt = true
break
}
}
if !sbAboveTLPHighRxt {
// TLP episode is complete.
s.rc.tlpRxtOut = false
}
}
if s.rc.tlpRxtOut && s.rc.tlpHighRxt.LessThanEq(ack) {
// TLP episode is complete.
s.rc.tlpRxtOut = false
if !checkDSACK(rcvdSeg) {
// Step 2. Either the original packet or the retransmission (in the
// form of a probe) was lost. Invoke a congestion control response
// equivalent to fast recovery.
s.cc.HandleLossDetected()
s.enterRecovery()
s.leaveRecovery()
}
}
}
// updateRACKReorderWindow updates the reorder window.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2
// - Step 4: Update RACK reordering window
// To handle the prevalent small degree of reordering, RACK.reo_wnd serves as
// an allowance for settling time before marking a packet lost. RACK starts
// initially with a conservative window of min_RTT/4. If no reordering has
// been observed RACK uses reo_wnd of zero during loss recovery, in order to
// retransmit quickly, or when the number of DUPACKs exceeds the classic
// DUPACKthreshold.
func (rc *rackControl) updateRACKReorderWindow() {
dsackSeen := rc.DSACKSeen
snd := rc.snd
// React to DSACK once per round trip.
// If SND.UNA < RACK.rtt_seq:
// RACK.dsack = false
if snd.SndUna.LessThan(rc.RTTSeq) {
dsackSeen = false
}
// If RACK.dsack:
// RACK.reo_wnd_incr += 1
// RACK.dsack = false
// RACK.rtt_seq = SND.NXT
// RACK.reo_wnd_persist = 16
if dsackSeen {
rc.ReoWndIncr++
dsackSeen = false
rc.RTTSeq = snd.SndNxt
rc.ReoWndPersist = tcpRACKRecoveryThreshold
} else if rc.exitedRecovery {
// Else if exiting loss recovery:
// RACK.reo_wnd_persist -= 1
// If RACK.reo_wnd_persist <= 0:
// RACK.reo_wnd_incr = 1
rc.ReoWndPersist--
if rc.ReoWndPersist <= 0 {
rc.ReoWndIncr = 1
}
rc.exitedRecovery = false
}
// Reorder window is zero during loss recovery, or when the number of
// DUPACKs exceeds the classic DUPACKthreshold.
// If RACK.reord is FALSE:
// If in loss recovery: (If in fast or timeout recovery)
// RACK.reo_wnd = 0
// Return
// Else if RACK.pkts_sacked >= RACK.dupthresh:
// RACK.reo_wnd = 0
// return
if !rc.Reord {
if snd.state == tcpip.RTORecovery || snd.state == tcpip.SACKRecovery {
rc.ReoWnd = 0
return
}
if snd.SackedOut >= nDupAckThreshold {
rc.ReoWnd = 0
return
}
}
// Calculate reorder window.
// RACK.reo_wnd = RACK.min_RTT / 4 * RACK.reo_wnd_incr
// RACK.reo_wnd = min(RACK.reo_wnd, SRTT)
snd.rtt.Lock()
srtt := snd.rtt.TCPRTTState.SRTT
snd.rtt.Unlock()
rc.ReoWnd = time.Duration((int64(rc.minRTT) / 4) * int64(rc.ReoWndIncr))
if srtt < rc.ReoWnd {
rc.ReoWnd = srtt
}
}
func (rc *rackControl) exitRecovery() {
rc.exitedRecovery = true
}
// detectLoss marks the segment as lost if the reordering window has elapsed
// and the ACK is not received. It will also arm the reorder timer.
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2 Step 5.
func (rc *rackControl) detectLoss(rcvTime tcpip.MonotonicTime) int {
var timeout time.Duration
numLost := 0
for seg := rc.snd.writeList.Front(); seg != nil && seg.xmitCount != 0; seg = seg.Next() {
if rc.snd.ep.scoreboard.IsSACKED(seg.sackBlock()) {
continue
}
if seg.lost && seg.xmitCount == 1 {
numLost++
continue
}
endSeq := seg.sequenceNumber.Add(seqnum.Size(seg.payloadSize()))
if seg.xmitTime.Before(rc.XmitTime) || (seg.xmitTime == rc.XmitTime && rc.EndSequence.LessThan(endSeq)) {
timeRemaining := seg.xmitTime.Sub(rcvTime) + rc.RTT + rc.ReoWnd
if timeRemaining <= 0 {
seg.lost = true
numLost++
} else if timeRemaining > timeout {
timeout = timeRemaining
}
}
}
if timeout != 0 && !rc.snd.reorderTimer.enabled() {
rc.snd.reorderTimer.enable(timeout)
}
return numLost
}
// reorderTimerExpired will retransmit the segments which have not been acked
// before the reorder timer expired.
//
// +checklocks:rc.snd.ep.mu
func (rc *rackControl) reorderTimerExpired() {
if rc.snd.reorderTimer.isZero() || !rc.snd.reorderTimer.checkExpiration() {
return
}
numLost := rc.detectLoss(rc.snd.ep.stack.Clock().NowMonotonic())
if numLost == 0 {
return
}
fastRetransmit := false
if !rc.snd.FastRecovery.Active {
rc.snd.cc.HandleLossDetected()
rc.snd.enterRecovery()
fastRetransmit = true
}
rc.DoRecovery(nil, fastRetransmit)
return
}
// DoRecovery implements lossRecovery.DoRecovery.
//
// +checklocks:rc.snd.ep.mu
func (rc *rackControl) DoRecovery(_ *segment, fastRetransmit bool) {
snd := rc.snd
if fastRetransmit {
snd.resendSegment()
}
var dataSent bool
// Iterate the writeList and retransmit the segments which are marked
// as lost by RACK.
for seg := snd.writeList.Front(); seg != nil && seg.xmitCount > 0; seg = seg.Next() {
if seg == snd.writeNext {
break
}
if !seg.lost {
continue
}
// Reset seg.lost as it is already SACKed.
if snd.ep.scoreboard.IsSACKED(seg.sackBlock()) {
seg.lost = false
continue
}
// Check the congestion window after entering recovery.
if snd.Outstanding >= snd.SndCwnd {
break
}
if sent := snd.maybeSendSegment(seg, int(snd.ep.scoreboard.SMSS()), snd.SndUna.Add(snd.SndWnd)); !sent {
break
}
dataSent = true
snd.Outstanding += snd.pCount(seg, snd.MaxPayloadSize)
}
snd.postXmit(dataSent, true /* shouldScheduleProbe */)
}
|
package main
import (
"fmt"
"os"
"sync"
)
//START1 OMIT
type Repo struct {
sync.RWMutex
items map[string]int
}
var (
r *Repo
once sync.Once
)
func GetInstance() *Repo {
once.Do(func() {
r = &Repo{
items: make(map[string]int),
}
})
return r
}
// END1 OMIT
//START2 OMIT
func (r *Repo) Set(key string, val int) {
r.Lock()
defer r.Unlock()
r.items[key] = val
}
func (r *Repo) Get(key string) (int, error) {
r.RLock()
defer r.RUnlock()
v, ok := r.items[key]
if !ok {
return 0, fmt.Errorf("The key %s is not present", key)
}
return v, nil
}
// END2 OMIT
//START3 OMIT
func main() {
r := GetInstance()
r.Set("jessy", 1)
v, err := r.Get("jessy")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("jessy: %d\n", v)
}
//END3 OMIT
|
package cornercase
import "fmt"
/*
Given a sorted integer array without duplicates, return the summary of its ranges.
Example 1:
Input: [0,1,2,4,5,7]
Output: ["0->2","4->5","7"]
Explanation: 0,1,2 form a continuous range; 4,5 form a continuous range.
Example 2:
Input: [0,2,3,4,6,8,9]
Output: ["0","2->4","6","8->9"]
Explanation: 2,3,4 form a continuous range; 8,9 form a continuous range.
*/
func summaryRanges(nums []int) []string {
ret := make([]string,0)
p,q := 0,0
for q < len(nums) {
for q+1 < len(nums) {
if nums[q+1] == nums[q] + 1 {
q += 1
} else {
break
}
}
if p == q {
ret = append(ret,fmt.Sprintf("%d",nums[p]))
} else {
ret = append(ret,fmt.Sprintf("%d->%d",nums[p],nums[q]))
}
p=q+1
q=p
}
return ret
}
func main() {
fmt.Println(summaryRanges([]int{0,1,2,4,5,7}))
fmt.Println(summaryRanges([]int{0,2,3,4,6,8,9}))
fmt.Println(summaryRanges([]int{0,1,2,3}))
fmt.Println(summaryRanges([]int{}))
}
|
package icalendar
import (
"bufio"
"fmt"
"io"
"reflect"
"sort"
"strings"
"time"
)
type VTIMEZONE struct {
// tzid are REQUIRED, but MUST NOT occur more than once.
TZID string
// 'last-mod' and 'tzurl' are OPTIONAL, and MAY occur more than once.
LASTMODIFIED string
TZURL string
// One of 'standardc' or 'daylightc' MUST occur and each MAY occur more than once.
STANDARD TZPROP
DAYLIGHT TZPROP
// The following are OPTIONAL, and MAY occur more than once.
XPROP string
IANAPROP string
}
type TZPROP struct {
// The following are REQUIRED, but MUST NOT occur more than once.
DTSTART time.Time
TZOFFSETTO string
TZOFFSETFROM string
// The following is OPTIONAL, but SHOULD NOT occur more than once.
RRULE string
// The following are OPTIONAL, and MAY occur more than once.
COMMENT string
RDATE time.Time
TZNAME string
XPROP string
IANAPROP string
}
func NewTimezone(tzid string) *VTIMEZONE {
return &VTIMEZONE{
TZID: tzid,
}
}
func (c *VTIMEZONE) Write(w io.Writer) error {
t := reflect.TypeOf(c).Elem()
v := reflect.ValueOf(c).Elem()
bw := bufio.NewWriter(w)
props := map[string]interface{}{}
for _, k := range []string{
"TZID",
} {
if _, has := t.FieldByName(k); has {
fv := v.FieldByName(k)
val := fv.Interface()
switch fv.Type() {
case reflect.TypeOf(time.Time{}):
t := val.(time.Time)
if k == "DTEND" && t.Unix() <= 0 {
start, _ := time.Parse("20060102T150405Z", props["DTSTART"].(string))
delete(props, "DTSTART")
props["DTSTART;VALUE=DATE"] = start.UTC().Format("20060102")
} else {
props[k] = t.UTC().Format("20060102T150405Z")
}
case reflect.TypeOf(""):
if val != "" {
props[k] = val
}
case reflect.TypeOf([]string{}):
if s := val.([]string); len(s) > 0 {
props[k] = strings.Join(s, ",")
}
default:
props[k] = val
}
}
}
var keys []string
for k, _ := range props {
keys = append(keys, k)
}
sort.Strings(keys)
fmt.Fprintf(bw, "BEGIN:%s\r\n", t.Name())
for _, k := range keys {
fmt.Fprintf(bw, "%s:%s\r\n", k, props[k])
}
fmt.Fprintf(bw, "END:%s\r\n", t.Name())
return nil
}
|
package types
import (
"fmt"
"strings"
sdk "github.com/cosmos/cosmos-sdk/types"
)
type Order struct {
Id string `json:"id"`
Borrower string `json:"borrower"`
Lender string `json:"lender"`
TokenGet sdk.Coin `json:"tokenGet"`
TokenGive sdk.Coin `json:"tokenGive"`
Owner sdk.AccAddress `json:"owner"`
}
func NewOrder(id, borrower, lender string, tokenGet, tokenGive sdk.Coin, owner sdk.AccAddress) Order {
return Order{
Id: id,
Borrower: borrower,
Lender: lender,
TokenGet: tokenGet,
TokenGive: tokenGive,
Owner: owner,
}
}
// implement fmt.Stringer
func (o Order) String() string {
return strings.TrimSpace(fmt.Sprintf(`Id: %s
Borrower: %s
Lender: %s
TokenGet: %v
TokenGive: %v
Owner: %s`, o.Id, o.Borrower, o.Lender, o.TokenGet, o.TokenGive, o.Owner))
}
|
package gocloudfiles
import (
"bytes"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"testing"
)
var (
TestUserName = os.Getenv("TEST_USERNAME")
TestApiKey = os.Getenv("TEST_KEY")
)
func TestMain(m *testing.M) {
if TestUserName == "" || TestApiKey == "" {
fmt.Println("Please set the environment variables TEST_USERNAME and TEST_KEY")
os.Exit(1)
} else {
os.Exit(m.Run())
}
}
func TestGetFileLength(t *testing.T) {
// Test we can get the length of a cloudfiles file without pulling the entire file
fmt.Println("Test get file length...")
cf := NewCloudFiles(TestUserName, TestApiKey)
err := cf.Authorize()
if err != nil {
t.Fatalf("Could not authorize: %s", err)
}
size, _, err := cf.GetFileSize("IAD", "testing", "ubuntu-14.04.4-desktop-amd64.iso")
if err != nil {
t.Fatalf("Could not get file size: %s", err)
}
realFileSize := int64(1069547520)
if size != realFileSize {
t.Fatalf("Size should be %d but instead is %d.", realFileSize, size)
}
}
func TestGetFileChunk(t *testing.T) {
// Test we can get a chunk of a file
fmt.Println("Test get file chunk...")
cf := NewCloudFiles(TestUserName, TestApiKey)
err := cf.Authorize()
if err != nil {
t.Fatalf("Could not authorize: %s", err)
}
size, _, err := cf.GetFileSize("IAD", "testing", "ubuntu-14.04.4-desktop-amd64.iso")
if err != nil {
t.Fatalf("Could not get file size: %s", err)
}
if size == 0 {
t.Fatalf("Size should be greater than 0.")
}
tmpFile, err := ioutil.TempFile("", "")
defer os.Remove(tmpFile.Name())
reportedSize, _, err := cf.GetChunk("IAD", "testing", "ubuntu-14.04.4-desktop-amd64.iso",
tmpFile, 100, 100000)
tmpFile.Close()
if err != nil {
t.Fatalf("Could not get chunk: %s", err)
}
if reportedSize != 100000 {
t.Fatalf("Bytes copied does not match: %d", reportedSize)
}
info, err := os.Stat(tmpFile.Name())
if err != nil {
t.Fatalf("Could not stat file: %s", err)
}
if info.Size() != 100000 {
t.Fatalf("Expected file size to be 100,000 but got: %d", info.Size())
}
}
func TestPutFileChunk(t *testing.T) {
// Test we can put a file chunk
fmt.Println("Test put file chunk...")
cf := NewCloudFiles(TestUserName, TestApiKey)
err := cf.Authorize()
if err != nil {
t.Fatalf("Could not authorize: %s", err)
}
buffer := make([]byte, 10000)
_, err = rand.Read(buffer)
if err != nil {
t.Fatalf("Could not generate random: %s", err)
}
reader := bytes.NewReader(buffer)
etag, err := cf.PutFile("IAD", "testing", "newfile.bin", reader)
if err != nil {
t.Fatalf("Could not put file: %s", err)
}
if etag == "" {
t.Fatalf("Etag is empty but should be filled.")
}
}
func TestCopyFile(t *testing.T) {
// Test we can copy one file from DC to DC.
fmt.Println("Test copy file...")
cf := NewCloudFiles(TestUserName, TestApiKey)
err := cf.Authorize()
if err != nil {
t.Fatalf("Could not authorize: %s", err)
}
err = cf.CopyFile("IAD", "testing", "ubuntu-14.04.4-desktop-amd64.iso",
"DFW", "testing", "ubuntu-14.04.4-desktop-amd64.iso")
if err != nil {
t.Fatalf("Could not copy file: %s", err)
}
}
|
package main
import (
"fmt"
"math/big"
)
/*
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2ˆ2=4, 2ˆ3=8, 2ˆ4=16, 2ˆ5=32
3ˆ2=9, 3ˆ3=27, 3ˆ4=81, 3ˆ5=243
4ˆ2=16, 4ˆ3=64, 4ˆ4=256, 4ˆ5=1024
5ˆ2=25, 5ˆ3=125, 5ˆ4=625, 5ˆ5=3125
If they are then placed in numerical order, with any repeats removed, we get the following sequence
of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
*/
var max = 100
func main() {
values := make(map[string]interface{})
for i := 2; i <= max; i++ {
for j := 2; j <= max; j++ {
n := big.NewInt(int64(i))
e := big.NewInt(int64(j))
values[n.Exp(n, e, nil).String()] = nil
}
}
sum := 0
for k, _ := range values {
_ = k
sum++
}
fmt.Println(sum)
}
|
package tracing
import (
"fmt"
"net/http"
"strconv"
zipkin "github.com/openzipkin/zipkin-go"
zipkinhttp "github.com/openzipkin/zipkin-go/middleware/http"
"github.com/openzipkin/zipkin-go/model"
reporterhttp "github.com/openzipkin/zipkin-go/reporter/http"
)
const endpointURL = "http://localhost:9411/api/v2/spans"
type Middler struct {
Tracer *zipkin.Tracer
EndpointURL string
Port int
}
func (m *Middler) NewTracer(service string) (*zipkin.Tracer, error) {
// reporter is responsible for sending traces to zipkin server
zipEndpoint := fmt.Sprintf("%s:%s", m.EndpointURL, strconv.Itoa(m.Port))
reporter := reporterhttp.NewReporter(zipEndpoint)
// local service endpoint
localEndpoint := &model.Endpoint{ServiceName: service, Port: uint16(m.Port)}
// Which traces to be sampled. In this case 100% (ie 1.00) of traces will be recorded.
sampler, err := zipkin.NewCountingSampler(1)
if err != nil {
return nil, err
}
t, err := zipkin.NewTracer(
reporter,
zipkin.WithSampler(sampler),
zipkin.WithLocalEndpoint(localEndpoint),
)
if err != nil {
return nil, err
}
return t, err
}
func (m *Middler) Instrument(span string, next http.Handler) http.Handler {
return zipkinhttp.NewServerMiddleware(
m.Tracer,
zipkinhttp.SpanName(span),
)(next)
}
func (m *Middler) InstrumentHTTPClient(client *http.Client) error {
var err error
client.Transport, err = zipkinhttp.NewTransport(
m.Tracer,
zipkinhttp.TransportTrace(true),
)
if err != nil {
return err
}
return nil
}
// Zipkin middleware for RPC to be implemented in near future (23/7/2018)
|
// An anagram set finder.
package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"sort"
"strings"
)
type runes []rune
// Len method returns the length of the runes list.
func (r runes) Len() int {
return len(r)
}
// Less method compares two runes lexicographically.
func (r runes) Less(i, j int) bool {
return r[i] < r[j]
}
// Swap method swaps two runes.
func (r runes) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// genAnagramKey takes a word, and returns the word converted to lowercase and sorted by letter.
func genAnagramKey(word string) string {
// Convert word to lowercase rune slice.
wordSlice := runes(strings.ToLower(word))
// Sort rune slice and return joined result to form anagram key.
sort.Sort(wordSlice)
return string(wordSlice)
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s input_file output_file\nIf no output_file provided, will use stdout.\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
// Get a dictionary input file, and output file from CLI.
if flag.NArg() < 1 {
flag.Usage()
os.Exit(1)
}
dictFilename := flag.Arg(0)
outFilename := flag.Arg(1)
// Open dictionary file.
words := []string{}
dictFile, err := os.Open(dictFilename)
if err != nil {
log.Fatalf("failed to open dictionary file: %v\n", err)
}
defer dictFile.Close()
// Read words (one per line) from dictFile and populate 'words' list.
scanner := bufio.NewScanner(dictFile)
for scanner.Scan() {
words = append(words, scanner.Text())
}
keyedWords := make(map[string][]string)
// For each word in 'words' list.
for _, word := range words {
anagramKey := genAnagramKey(word)
//Key 'keyedWords' by anagramKey, and append original word to value list.
keyedWords[anagramKey] = append(keyedWords[anagramKey], word)
}
// Free 'words' list.
words = nil
// Sort keys of 'keyedWords', appending to 'anagramKeys' list.
var anagramKeys []string
for anagramKey := range keyedWords {
anagramKeys = append(anagramKeys, anagramKey)
}
sort.Strings(anagramKeys)
// Open output file. If no output file specified, use os.Stdout.
outFile := os.Stdout
if len(outFilename) != 0 {
outFile, err = os.Create(outFilename)
if err != nil {
log.Fatalf("failed to open outpuf file: %v\n", err)
}
defer outFile.Close()
}
// For each key in 'anagramKeys' list, output list of words (anagram set) to output file.
for _, anagramKey := range anagramKeys {
fmt.Fprintf(outFile, "%v\n", strings.Join(keyedWords[anagramKey], " "))
}
}
|
package main
import("fmt")
//Implementação de Struct de Pilha-> utiliza slices para facilitar
type Stack struct {
data []Tree
}
func (s Stack) push(info Tree) Stack {
s.data = append(s.data, info)
return s
}
func (s Stack) pop() (Stack, *Tree) {
var info = s.data[len(s.data)-1]
s.data = s.data[:len(s.data)-1]
return s, &info
}
func (s Stack) size() int {
return len(s.data)
}
/*func (s Stack) print() {
for _, tree := range s.data {
printBplc(&tree)
}
}*/
func (s Stack) print() {
l := len(s.data)
if l == 0 {
fmt.Print("0")
return
}
printBplc(&s.data[l-1])
for i := l - 2; i >= 0; i-- {
fmt.Print(" ")
printBplc(&s.data[i])
}
}
|
package stringutils
import (
"github.com/asktop/gotools/acast"
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
func Len(s string) int {
return len([]rune(s))
}
//截取字符串
// @param length 不设置:截取全部;负数:向前截取
func Substr(s string, start int, length ...int) string {
rs := []rune(s)
l := len(rs)
if len(length) > 0 {
l = length[0]
}
if l > 0 {
if start <= 0 {
start = 0
} else {
if start > len(rs) {
start = start % len(rs)
}
}
end := start + l
if start+l > len(rs) {
end = len(rs)
}
return string(rs[start:end])
} else if l < 0 {
if start <= 0 {
start = len(rs)
} else {
if start > len(rs) {
start = start % len(rs)
}
}
end := start
start = end + l
if end+l < 0 {
start = 0
}
return string(rs[start:end])
} else {
return ""
}
}
//截取字符串
// @param end 0:截取全部;负数:从后往前
func SubstrByEnd(s string, start int, end int) string {
rs := []rune(s)
if start < 0 {
start = 0
}
if start > len(rs) {
start = start % len(rs)
}
if end >= 0 {
if end < start || end > len(rs) {
end = len(rs)
}
} else {
if len(rs)+end < start {
end = len(rs)
} else {
end = len(rs) + end
}
}
return string(rs[start:end])
}
//字符串是否相同(不区分大小写)
func EqualNoCase(str1 interface{}, str2 interface{}) bool {
return strings.ToLower(acast.ToString(str1)) == strings.ToLower(acast.ToString(str2))
}
//替换字符串(不区分大小写)
func ReplaceNoCase(s string, old string, new string, n int) string {
if n == 0 {
return s
}
ls := strings.ToLower(s)
lold := strings.ToLower(old)
if m := strings.Count(ls, lold); m == 0 {
return s
} else if n < 0 || m < n {
n = m
}
ns := make([]byte, len(s)+n*(len(new)-len(old)))
w := 0
start := 0
for i := 0; i < n; i++ {
j := start
if len(old) == 0 {
if i > 0 {
_, wid := utf8.DecodeRuneInString(s[start:])
j += wid
}
} else {
j += strings.Index(ls[start:], lold)
}
w += copy(ns[w:], s[start:j])
w += copy(ns[w:], new)
start = j + len(old)
}
w += copy(ns[w:], s[start:])
return string(ns[0:w])
}
//删除字符串两端的空格(含tab),同时将中间多个空格(含tab)的转换为一个
func TrimSpaceToOne(s string) string {
s = strings.TrimSpace(s)
s = strings.Replace(s, " ", " ", -1) //替换tab为空格
reg, _ := regexp.Compile("\\s{2,}") //编译正则表达式
s2 := make([]byte, len(s)) //定义字符数组切片
copy(s2, s) //将字符串复制到切片
spc_index := reg.FindStringIndex(string(s2)) //在字符串中搜索
for len(spc_index) > 0 { //找到适配项
s2 = append(s2[:spc_index[0]+1], s2[spc_index[1]:]...) //删除多余空格
spc_index = reg.FindStringIndex(string(s2)) //继续在字符串中搜索
}
return string(s2)
}
// int 转换成指定长度的 string
// @param force 强制转换,当num长度大于length时,删除前面超过的部分
func IntToStr(num int, length int, force ...bool) string {
if length <= 0 {
return strconv.Itoa(num)
} else {
if num < 0 {
numStr := strconv.Itoa(-num)
if len(force) > 0 && force[0] || len(numStr) < length {
numStr = strings.Repeat("0", length) + numStr
return "-" + numStr[len(numStr)-length:]
} else {
return "-" + numStr
}
} else {
numStr := strconv.Itoa(num)
if len(force) > 0 && force[0] || len(numStr) < length {
numStr = strings.Repeat("0", length) + numStr
return numStr[len(numStr)-length:]
} else {
return numStr
}
}
}
}
// int 转换成指定长度的 string
// @param force 强制转换,当num长度大于length时,删除前面超过的部分
func Int64ToStr(num int64, length int, force ...bool) string {
if length <= 0 {
return strconv.FormatInt(num, 10)
} else {
if num < 0 {
numStr := strconv.FormatInt(-num, 10)
if len(force) > 0 && force[0] || len(numStr) < length {
numStr = strings.Repeat("0", length) + numStr
return "-" + numStr[len(numStr)-length:]
} else {
return "-" + numStr
}
} else {
numStr := strconv.FormatInt(num, 10)
if len(force) > 0 && force[0] || len(numStr) < length {
numStr = strings.Repeat("0", length) + numStr
return numStr[len(numStr)-length:]
} else {
return numStr
}
}
}
}
//将多个对象拼接成字符串
func Join(args ...interface{}) string {
var rs string
for _, arg := range args {
rs += acast.ToStringForce(arg) + " "
}
return strings.TrimSpace(rs)
}
//隐藏字符串
// start:前端显示长度
// end:后端显示长度
// length:指定显示总长度,若不指定,则按原字符串长度输出
func HideNo(s string, start int, end int, length ...int) string {
s = strings.TrimSpace(s)
oldLen := len(s)
newLen := oldLen
if len(length) > 0 {
newLen = length[0]
}
minLen := oldLen
if oldLen >= newLen {
minLen = newLen
}
if minLen <= 1 {
return strings.Repeat("*", newLen)
}
if start >= minLen {
start = minLen - 1
end = 0
} else if end >= minLen {
start = 0
end = minLen - 1
} else if start+end >= minLen {
start = minLen / 2
end = minLen/2 - 1
}
rs := Substr(s, 0, start) + strings.Repeat("*", newLen-start-end) + Substr(s, 0, -end)
return rs
}
//隐藏 手机号
func HidePhone(s string) string {
s = strings.TrimSpace(s)
length := len(s)
if length == 0 {
return ""
}
if strings.Contains(s, "+") {
return Substr(s, 0, length-8) + "****" + SubstrByEnd(s, length-4, 0)
} else {
if strings.Contains(s, "-") || strings.Contains(s, "_") || strings.Contains(s, " ") {
return Substr(s, 0, length-6) + "***" + SubstrByEnd(s, length-3, 0)
} else {
if length == 11 {
return Substr(s, 0, 3) + "****" + SubstrByEnd(s, length-4, 0)
} else {
return Substr(s, 0, length-6) + "***" + SubstrByEnd(s, length-3, 0)
}
}
}
}
//隐藏 邮箱
func HideEmail(s string) string {
emails := strings.Split(s, "@")
if len(emails) != 2 {
return s
}
return HideNo(emails[0], 2, 2, 6) + "@" + emails[1]
}
//隐藏 密码
func HidePwd(s string, allHide ...bool) string {
s = strings.TrimSpace(s)
if len(allHide) > 0 && allHide[0] {
return "******"
} else {
if len(s) > 0 {
return "******"
} else {
return ""
}
}
}
//转换成 首字母大写
func ToFirstUpper(s string) string {
s = strings.TrimSpace(s)
if s != "" {
s = strings.ToUpper(s[:1]) + s[1:]
}
return s
}
//转换成 首字母小写
func ToFirstLower(s string) string {
s = strings.TrimSpace(s)
if s != "" {
s = strings.ToLower(s[:1]) + s[1:]
}
return s
}
//转换成 大驼峰命名(UserId)
func ToCamelCase(s string) string {
if IsNum_Alpha(s) {
var rs string
s = strings.TrimSpace(s)
es := strings.Split(s, "_")
for _, e := range es {
rs += ToFirstUpper(e)
}
return rs
} else {
return s
}
}
//转换成 小驼峰命名(userId)
func TocamelCase(s string) string {
return ToFirstLower(ToCamelCase(s))
}
//转换成 大下划线命名(USER_ID)
func ToUnderscoreCase(s string) string {
return strings.ToUpper(TounderscoreCase(s))
}
//转换成 小下划线命名(user_id)
func TounderscoreCase(s string) string {
if IsNum_Alpha(s) {
var rs string
l := len(s)
for i := 0; i < l; i++ {
e := s[i : i+1]
if IsUpper(e) {
e = "_" + strings.ToLower(e)
}
rs += e
}
rs = strings.TrimPrefix(rs, "_")
rs = strings.Replace(rs, "__", "_", -1)
return rs
} else {
return s
}
}
//分割字符串为 int 数组
func SplitToInt(str string, sep string) (rs []int, err error) {
if sep == "" {
sep = ","
}
strs := strings.Split(strings.TrimSpace(str), sep)
for _, s := range strs {
i, err := strconv.Atoi(s)
if err != nil {
return rs, err
}
rs = append(rs, i)
}
return rs, nil
}
//分割字符串为 int64 数组
func SplitToInt64(str string, sep string) (rs []int64, err error) {
if sep == "" {
sep = ","
}
strs := strings.Split(strings.TrimSpace(str), sep)
for _, s := range strs {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return rs, err
}
rs = append(rs, i)
}
return rs, nil
}
//拼接 int 数组为字符串
func JoinFromInt(source []int, sep string) (str string) {
if sep == "" {
sep = ","
}
for _, i := range source {
str += strconv.Itoa(i) + sep
}
return strings.TrimSuffix(str, sep)
}
//拼接 int64 数组为字符串
func JoinFromInt64(source []int64, sep string) (str string) {
if sep == "" {
sep = ","
}
for _, i := range source {
str += strconv.FormatInt(i, 10) + sep
}
return strings.TrimSuffix(str, sep)
}
|
package main
// Leetcode 1483. (hard)
type TreeAncestor struct {
dp [][]int
}
func Constructor(n int, parent []int) TreeAncestor {
dp := make([][]int, n)
for i := range dp {
dp[i] = make([]int, 1)
dp[i][0] = parent[i]
}
j := 1
for {
finish := true
for i := range dp {
if dp[i][j-1] == -1 {
dp[i] = append(dp[i], -1)
} else {
dp[i] = append(dp[i], dp[dp[i][j-1]][j-1])
finish = false
}
}
if finish {
break
}
j++
}
return TreeAncestor{dp}
}
func (this *TreeAncestor) GetKthAncestor(node int, k int) int {
res, pos := node, 0
for k != 0 && res != -1 {
if pos >= len(this.dp[res]) {
return -1
}
if k&1 != 0 {
res = this.dp[res][pos]
}
k >>= 1
pos += 1
}
return res
}
|
package main
import (
"container/list"
"sync"
pb "../protobuf/go"
"sort"
"fmt"
"github.com/op/go-logging"
mysha2 "../sha256-simd-master"
)
type Miner struct {
lock sync.RWMutex
longest *RichBlock
miningTxs TxList
uuidmap map[string]int32
alarm Notification
}
type TxList []*pb.Transaction
func newMiner() *Miner {
var miner Miner
miner.longest = blockchain.longest
miner.miningTxs = TxList{}
miner.uuidmap = make(map[string]int32)
miner.alarm.channel = make(chan int, 1)
return &miner
}
func (self *Miner) insertTxs(newTxs *list.List, log *logging.Logger) {
for e := newTxs.Front(); e != nil; e = e.Next() {
tx := e.Value.(*pb.Transaction)
if _, exist := self.uuidmap[tx.UUID]; exist {
//log.Debugf("[ ]Transaction exist")
continue
}
self.miningTxs = append(self.miningTxs, tx)
}
sort.Sort(self.miningTxs)
log.Debugf("[ ]Transaction before filter: %d", len(self.miningTxs))
self.miningTxs = self.longest.accounts.filterTxArray(self.miningTxs, blockLimit, log)
log.Debugf("[ ]Transaction after filter: %d", len(self.miningTxs))
self.alarm.wake() //do some notify things
}
func (self *Miner) update(log *logging.Logger) bool {
log.Debug("[wait]Blockchain read Lock1")
blockchain.lock1.RLock()
log.Debug("[lock]Blockchain read Lock1")
log.Debugf("[ ]Self longest ID %d, blockchain longest id %d", self.longest.block.BlockID, blockchain.longest.block.BlockID)
if self.longest == blockchain.longest {
log.Debug("[rels]Blockchain read Lock1")
blockchain.lock1.RUnlock()
return false
}
// Update UUIDmap
tmpBlock1 := self.longest
for !blockchain.checkAncestor_nosync(blockchain.longest, tmpBlock1, log) {
self.undoUUIDmap(tmpBlock1)
tmpBlock1 = blockchain.blocks[tmpBlock1.block.PrevHash]
}
log.Warningf("Switch longest block %d, %d, %d", self.longest.block.BlockID, tmpBlock1.block.BlockID, blockchain.longest.block.BlockID)
tmpBlock2 := blockchain.longest
for tmpBlock1 != tmpBlock2 {
self.doUUIDmap(tmpBlock2)
tmpBlock2 = blockchain.blocks[tmpBlock2.block.PrevHash]
}
self.longest = blockchain.longest
blockchain.lock1.RUnlock()
log.Debug("[rels]Blockchain read Lock1")
// Change Pending
log.Debug("[wait]Tx data Lock")
pendingTxs.dataLock.Lock()
log.Debug("[lock]Tx data Lock")
log.Debugf("[ ]Pending list before deduplicate: %d", pendingTxs.data.Len())
var eNext *list.Element
for e := pendingTxs.data.Front(); e != nil; e = eNext {
eNext = e.Next()
tx, success := e.Value.(*pb.Transaction)
if !success {
log.Error("Invalid format in list")
}
if value, exist := self.uuidmap[tx.UUID]; exist && self.longest.block.BlockID - value >= 6 {
pendingTxs.data.Remove(e)
delete(pendingTxs.uuidmap, tx.UUID)
}
}
if pendingTxs.data.Len() > pendingFilterThreshold {
log.Debugf("[ ]Pending list before filter: %d", pendingTxs.data.Len())
self.longest.accounts.filterPendingTx()
log.Debugf("[ ]Pending list after filter: %d", pendingTxs.data.Len())
} else {
log.Debugf("[ ]Pending list length: %d, do not need filter", pendingTxs.data.Len())
}
self.miningTxs = make([]*pb.Transaction, 0, blockLimit/8)
self.insertTxs(pendingTxs.data, log) //Rebuild miningTx using pending
pendingTxs.dataLock.Unlock()
log.Debug("[rels]Tx data Lock")
return true
}
func (self *Miner) doUUIDmap(block *RichBlock) {
for _, tx := range block.block.Transactions {
self.uuidmap[tx.UUID] = block.block.BlockID
}
}
func (self *Miner) undoUUIDmap(block *RichBlock) {
for _, tx := range block.block.Transactions {
delete(self.uuidmap, tx.UUID)
}
}
func (self *Miner) run_producer() {
log := logging.MustGetLogger("miner")
inchannel := make([]chan string, minerThreads)
alarms := make([]Notification, minerThreads)
outchannel := make(chan string)
for i := 0; i < minerThreads; i++ {
inchannel[i] = make(chan string, 1)
alarms[i].channel = make(chan int, 1)
go mine(inchannel[i], outchannel, alarms[i], i)
}
go self.run_minter(outchannel)
blockchain.lock1.RLock()
self.longest = blockchain.blocks[zeroHash]
blockchain.lock1.RUnlock()
log.Info("Miner Prepared")
for {
log.Debug("Miner Sleep")
self.alarm.sleep()
log.Debug("Miner Wake up")
self.lock.Lock()
log.Debug("[lock]Miner lock")
self.update(log)
if len(self.miningTxs) > 0 {
block := pb.Block{
BlockID: self.longest.block.BlockID + 1,
PrevHash: self.longest.hash,
Transactions: self.miningTxs,
MinerID: fmt.Sprintf("Server%02d", selfid),
Nonce: "00000000",
}
//jsonstring, err := json.Marshal(block)
jsonstring, err := pbMarshal.MarshalToString(&block)
if err != nil {
log.Infof("json encoding error: %v", err)
}
in := string(jsonstring[:len(jsonstring)-10])
log.Debugf("[ ]Update json")
for i := 0; i < minerThreads; i++ {
select {
case <- inchannel[i]:
default:
}
inchannel[i] <- in
alarms[i].wake()
}
} else {
log.Debugf("[ ]Block Slave")
for i := 0; i < minerThreads; i++ {
select {
case <- inchannel[i]:
default:
}
inchannel[i] <- ""
}
}
self.lock.Unlock()
log.Debug("[rels]Miner lock")
}
}
func (self *Miner) run_minter(out chan string) {
log := logging.MustGetLogger("clect")
for {
js := <-out
if CheckHash(GetHashString(js)) {
log.Noticef("Mine: %s", js)
pushBlocks(js)
if block, err := blockchain.parse(&pb.JsonBlockString{js}); err == nil {
log.Warningf("Mine: %d, %10.10s, %10.10s",block.block.BlockID, block.hash, block.block.PrevHash)
blockchain.add(block, true, log)
} else {
log.Error("Self Block Wrong:", err)
}
} else if GetHashString(js) != GetFastHashString(js) {
log.Error("Oracle is not sha256, change")
sha256o = false
} else {
log.Errorf("Invalid hash: %s", GetHashString(js))
}
}
}
func mine(in chan string, out chan string, alarm Notification, i int) {
log := logging.MustGetLogger(fmt.Sprintf("mine%01d", i))
log.Info("Miner Slave start")
currentJS := ""
j := 0
for {
loop := true
for loop {
select {
case tmp := <-in:
currentJS = tmp
//log.Debug("Js update: %.20s", currentJS)
default:
loop = false
}
}
if currentJS == ""{
log.Debug("Miner Slave sleep")
alarm.sleep()
log.Debug("Miner Slave wake up")
} else {
sha2 := mysha2.New()
sha2.Write([]byte(currentJS))
for k := 0; k < 10000; k++ {
var s string
if sha256o {
s = fmt.Sprintf("%x", sha2.Sum([]byte(fmt.Sprintf("%01d%03d%04d\"}", i, j, k))))
} else {
s = GetHashString(fmt.Sprintf("%s%01d%03d%04d\"}", currentJS, i, j, k))
}
if CheckHash(s) {
answer := currentJS + fmt.Sprintf("%01d%03d%04d\"}", i, j, k)
out <- answer
currentJS = ""
break
}
}
}
j++
j = j % 1000
}
}
func (self TxList) Len() int {
return len(self)
}
func (self TxList) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
}
func (self TxList) Less(i, j int) bool {
if self[i].MiningFee != self[j].MiningFee {
return self[i].MiningFee > self[j].MiningFee
} else {
return self[i].UUID > self[j].UUID
}
}
|
package JsJdk
import (
"JsGo/JsConfig"
"JsGo/JsHttp"
"JsGo/JsLogger"
"crypto/sha1"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/astaxie/beego"
)
type ST_WeChat_AccessToken struct {
Access_token string `json:"access_token"`
Expires_in int `json:"expires_in"`
}
type ST_WeChat_Jsapi_Ticket struct {
Errcode int `json:"errcode"`
Errmsg string `json:"errmsg"`
Ticket string `json:"ticket"`
Expires_in int `json:"expires_in"`
}
type ST_WeChatJsapiController struct {
beego.Controller
}
type ST_Jsapi_Interface struct {
AppId string `json:"appid"`
Timestamp string `json:"timestamp"`
NonceStr string `json:"nonceStr"`
Signature string `json:"signature"`
JsApiList []string `json:"jsApiList"`
}
type ST_JsApiRet struct {
ST_Jsapi_Interface
Token string
}
type ST_ParaUrl struct {
Url string `json:"url"`
}
var g_wechat_token string = ""
var g_wechat_jsapi_ticket string = ""
var g_jsConfig ST_Jsapi_Interface
var g_lock sync.Mutex
var g_jString string = ""
func JsJdkInit() {
config := JsConfig.GetConfig()
if config == nil {
log.Fatalln("JsConfig.GetConfig is nil")
return
}
accessPath := config.WxJsApi.WeChatAccessToken
ticketPath := config.WxJsApi.WeChatJsapiTicket
g_jsConfig.AppId = config.WxJsApi.WeChatAppId
apiList := config.WxJsApi.WeChatJsapiList
//accessPath, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatAccessToken"})
//if err != nil {
// log.Fatalln(err)
//}
//
//ticketPath, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatJsapiTicket"})
//if err != nil {
// log.Fatalln(err)
//}
//
//g_jsConfig.AppId, err = JsConfig.GetConfigString([]string{"WxJsApi", "WeChatAppId"})
//if err != nil {
// log.Fatalln(err)
//}
//
//apiList, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatJsapiList"})
//if err != nil {
// log.Fatalln(err)
//}
g_jsConfig.JsApiList = strings.Split(apiList, ",")
go wechat_token_coolie(accessPath, ticketPath)
JsHttp.WhiteHttps("/wxjsapi", doWxJsapi)
}
func JsJdkInit_Unsafe() {
config := JsConfig.GetConfig()
if config == nil {
log.Fatalln("JsConfig.GetConfig is nil")
return
}
accessPath := config.WxJsApi.WeChatAccessToken
ticketPath := config.WxJsApi.WeChatJsapiTicket
g_jsConfig.AppId = config.WxJsApi.WeChatAppId
apiList := config.WxJsApi.WeChatJsapiList
//accessPath, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatAccessToken"})
//if err != nil {
// log.Fatalln(err)
//}
//
//ticketPath, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatJsapiTicket"})
//if err != nil {
// log.Fatalln(err)
//}
//
//g_jsConfig.AppId, err = JsConfig.GetConfigString([]string{"WxJsApi", "WeChatAppId"})
//if err != nil {
// log.Fatalln(err)
//}
//
//apiList, err := JsConfig.GetConfigString([]string{"WxJsApi", "WeChatJsapiList"})
//if err != nil {
// log.Fatalln(err)
//}
g_jsConfig.JsApiList = strings.Split(apiList, ",")
go wechat_token_coolie(accessPath, ticketPath)
JsHttp.WhiteHttp("/wxjsapi", doWxJsapi)
}
func wechat_token_coolie(accessPath, ticketPath string) {
for {
response, e := http.Get(accessPath)
if e != nil {
JsLogger.Error(e.Error())
if response != nil {
response.Body.Close()
}
} else {
b := make([]byte, 2048)
n, _ := response.Body.Read(b)
var token ST_WeChat_AccessToken
json.Unmarshal(b[:n], &token)
g_wechat_token = token.Access_token
// fmt.Printf("token=%s", g_wechat_token)
JsLogger.Info("token=%s", g_wechat_token)
ticket_path := ticketPath + "?access_token=" + g_wechat_token + "&type=jsapi"
update_jsapi_ticket(g_wechat_token, ticket_path)
}
time.Sleep(time.Hour)
}
}
func update_jsapi_ticket(token, ticketPath string) {
response, e := http.Get(ticketPath)
if e != nil {
JsLogger.Error(e.Error())
response.Body.Close()
} else {
b := make([]byte, 2048)
n, _ := response.Body.Read(b)
var ticket ST_WeChat_Jsapi_Ticket
json.Unmarshal(b[:n], &ticket)
g_wechat_jsapi_ticket = ticket.Ticket
}
}
func buildSignature(url string) {
config := JsConfig.GetConfig()
if config == nil {
return
}
g_jsConfig.NonceStr = config.WxJsApi.WeChatNoncestr
//var e error
//g_jsConfig.NonceStr, e = JsConfig.GetConfigString([]string{"WxJsApi", "WeChatNoncestr"})
//if e != nil {
// Error(e.Error())
//}
now := time.Now().Nanosecond()
timestamp := strconv.Itoa(now)
g_jsConfig.Timestamp = timestamp
str := "jsapi_ticket="
str += g_wechat_jsapi_ticket
str += "&noncestr="
str += g_jsConfig.NonceStr
str += "×tamp="
str += timestamp
str += "&url="
str += url
//产生一个散列值得方式是 sha1.New(),sha1.Write(bytes),然后 sha1.Sum([]byte{})。这里我们从一个新的散列开始。
h := sha1.New()
//写入要处理的字节。如果是一个字符串,需要使用[]byte(s) 来强制转换成字节数组。
h.Write([]byte(str))
//这个用来得到最终的散列值的字符切片。Sum 的参数可以用来dui现有的字符切片追加额外的字节切片:一般不需要要。
g_jsConfig.Signature = fmt.Sprintf("%x", string(h.Sum(nil)))
}
func RegisterJsApi(url string) {
JsHttp.Http(url, doWxJsapi)
}
func doWxJsapi(session *JsHttp.Session) {
var req_url ST_ParaUrl
e := session.GetPara(&req_url)
if e != nil {
JsLogger.Error(e.Error())
session.Forward("1", e.Error(), nil)
return
}
buildSignature(req_url.Url)
ret := &ST_JsApiRet{}
ret.AppId = g_jsConfig.AppId
ret.JsApiList = g_jsConfig.JsApiList
ret.NonceStr = g_jsConfig.NonceStr
ret.Signature = g_jsConfig.Signature
ret.Timestamp = g_jsConfig.Timestamp
ret.Token = g_wechat_token
session.Forward("0", "success", ret)
}
|
// Copyright © 2017 Microsoft <wastore@microsoft.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"errors"
"fmt"
"strings"
"time"
"github.com/Azure/azure-storage-azcopy/common"
"github.com/Azure/azure-storage-azcopy/ste"
"github.com/spf13/cobra"
)
// TODO the behavior of the resume command should be double-checked
// TODO ex: does it output json??
type resumeJobController struct {
// generated
jobID common.JobID
// variables used to calculate progress
// intervalStartTime holds the last time value when the progress summary was fetched
// the value of this variable is used to calculate the throughput
// it gets updated every time the progress summary is fetched
intervalStartTime time.Time
intervalBytesTransferred uint64
// used to calculate job summary
jobStartTime time.Time
}
func (cca *resumeJobController) PrintJobStartedMsg() {
glcm.Info("\nJob " + cca.jobID.String() + " has started\n")
}
func (cca *resumeJobController) CancelJob() {
err := cookedCancelCmdArgs{jobID: cca.jobID}.process()
if err != nil {
glcm.ExitWithError("error occurred while cancelling the job "+cca.jobID.String()+". Failed with error "+err.Error(), common.EExitCode.Error())
}
}
func (cca *resumeJobController) InitializeProgressCounters() {
cca.jobStartTime = time.Now()
cca.intervalStartTime = time.Now()
cca.intervalBytesTransferred = 0
}
func (cca *resumeJobController) PrintJobProgressStatus() {
// fetch a job status
var summary common.ListJobSummaryResponse
Rpc(common.ERpcCmd.ListJobSummary(), &cca.jobID, &summary)
jobDone := summary.JobStatus == common.EJobStatus.Completed() || summary.JobStatus == common.EJobStatus.Cancelled()
// if json is not desired, and job is done, then we generate a special end message to conclude the job
if jobDone {
duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job
glcm.ExitWithSuccess(fmt.Sprintf(
"\n\nJob %s summary\nElapsed Time (Minutes): %v\nTotal Number Of Transfers: %v\nNumber of Transfers Completed: %v\nNumber of Transfers Failed: %v\nFinal Job Status: %v\n",
summary.JobID.String(),
ste.ToFixed(duration.Minutes(), 4),
summary.TotalTransfers,
summary.TransfersCompleted,
summary.TransfersFailed,
summary.JobStatus), common.EExitCode.Success())
}
// if json is not needed, and job is not done, then we generate a message that goes nicely on the same line
// display a scanning keyword if the job is not completely ordered
var scanningString = ""
if !summary.CompleteJobOrdered {
scanningString = "(scanning...)"
}
// compute the average throughput for the last time interval
bytesInMB := float64(float64(summary.BytesOverWire-cca.intervalBytesTransferred) / float64(1024*1024))
timeElapsed := time.Since(cca.intervalStartTime).Seconds()
throughPut := common.Iffloat64(timeElapsed != 0, bytesInMB/timeElapsed, 0)
// reset the interval timer and byte count
cca.intervalStartTime = time.Now()
cca.intervalBytesTransferred = summary.BytesOverWire
// As there would be case when no bits sent from local, e.g. service side copy, when throughput = 0, hide it.
progressStr := fmt.Sprintf("%v Done, %v Failed, %v Pending, %v Total%s",
summary.TransfersCompleted,
summary.TransfersFailed,
summary.TotalTransfers-(summary.TransfersCompleted+summary.TransfersFailed),
summary.TotalTransfers,
scanningString)
if throughPut != 0 {
progressStr = fmt.Sprintf("%s, 2-sec Throughput (MB/s): %v", progressStr, ste.ToFixed(throughPut, 4))
}
glcm.Progress(progressStr)
}
func init() {
resumeCmdArgs := resumeCmdArgs{}
// resumeCmd represents the resume command
resumeCmd := &cobra.Command{
Use: "resume jobID",
SuggestFor: []string{"resme", "esume", "resue"},
Short: "Resume the existing job with the given job ID",
Long: `
Resume the existing job with the given job ID.`,
Args: func(cmd *cobra.Command, args []string) error {
// the resume command requires necessarily to have an argument
// resume jobId -- resumes all the parts of an existing job for given jobId
// If no argument is passed then it is not valid
if len(args) != 1 {
return errors.New("this command requires jobId to be passed as argument")
}
resumeCmdArgs.jobID = args[0]
return nil
},
Run: func(cmd *cobra.Command, args []string) {
err := resumeCmdArgs.process()
if err != nil {
glcm.ExitWithError(fmt.Sprintf("failed to perform resume command due to error: %s", err.Error()), common.EExitCode.Error())
}
},
}
rootCmd.AddCommand(resumeCmd)
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.includeTransfer, "include", "", "Filter: only include these failed transfer(s) when resuming the job. "+
"Files should be separated by ';'.")
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.excludeTransfer, "exclude", "", "Filter: exclude these failed transfer(s) when resuming the job. "+
"Files should be separated by ';'.")
// oauth options
resumeCmd.PersistentFlags().BoolVar(&resumeCmdArgs.useInteractiveOAuthUserCredential, "oauth-user", false, "Use OAuth user credential and do interactive login.")
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.tenantID, "tenant-id", common.DefaultTenantID, "Tenant id to use for OAuth user interactive login.")
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.aadEndpoint, "aad-endpoint", common.DefaultActiveDirectoryEndpoint, "Azure active directory endpoint to use for OAuth user interactive login.")
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.SourceSAS, "source-sas", "", "source sas of the source for given JobId")
resumeCmd.PersistentFlags().StringVar(&resumeCmdArgs.DestinationSAS, "destination-sas", "", "destination sas of the destination for given JobId")
}
type resumeCmdArgs struct {
jobID string
includeTransfer string
excludeTransfer string
// oauth options
useInteractiveOAuthUserCredential bool
tenantID string
aadEndpoint string
SourceSAS string
DestinationSAS string
}
// processes the resume command,
// dispatches the resume Job order to the storage engine.
func (rca resumeCmdArgs) process() error {
// parsing the given JobId to validate its format correctness
jobID, err := common.ParseJobID(rca.jobID)
if err != nil {
// If parsing gives an error, hence it is not a valid JobId format
return fmt.Errorf("error parsing the jobId %s. Failed with error %s", rca.jobID, err.Error())
}
includeTransfer := make(map[string]int)
excludeTransfer := make(map[string]int)
// If the transfer has been provided with the include, parse the transfer list.
if len(rca.includeTransfer) > 0 {
// Split the Include Transfer using ';'
transfers := strings.Split(rca.includeTransfer, ";")
for index := range transfers {
if len(transfers[index]) == 0 {
// If the transfer provided is empty
// skip the transfer
// This is to handle the misplaced ';'
continue
}
includeTransfer[transfers[index]] = index
}
}
// If the transfer has been provided with the exclude, parse the transfer list.
if len(rca.excludeTransfer) > 0 {
// Split the Exclude Transfer using ';'
transfers := strings.Split(rca.excludeTransfer, ";")
for index := range transfers {
if len(transfers[index]) == 0 {
// If the transfer provided is empty
// skip the transfer
// This is to handle the misplaced ';'
continue
}
excludeTransfer[transfers[index]] = index
}
}
// Initialize credential info.
credentialInfo := common.CredentialInfo{
CredentialType: common.ECredentialType.Anonymous(),
}
// Check whether to use OAuthToken credential.
// Scenario-1: interactive login per copy command
// Scenario-Test: unattended testing with oauthTokenInfo set through environment variable
// Scenario-2: session mode which get token from cache
uotm := GetUserOAuthTokenManagerInstance()
hasCachedToken, err := uotm.HasCachedToken()
if rca.useInteractiveOAuthUserCredential || common.EnvVarOAuthTokenInfoExists() || hasCachedToken {
credentialInfo.CredentialType = common.ECredentialType.OAuthToken()
var oAuthTokenInfo *common.OAuthTokenInfo
// For Scenario-1, create token with interactive login if necessary.
if rca.useInteractiveOAuthUserCredential {
oAuthTokenInfo, err = uotm.LoginWithADEndpoint(rca.tenantID, rca.aadEndpoint, false)
if err != nil {
return fmt.Errorf(
"login failed with tenantID %q, using public Azure directory endpoint 'https://login.microsoftonline.com', due to error: %s",
rca.tenantID,
err.Error())
}
} else if oAuthTokenInfo, err = uotm.GetTokenInfoFromEnvVar(); err == nil || !common.IsErrorEnvVarOAuthTokenInfoNotSet(err) {
// Scenario-Test
glcm.Info(fmt.Sprintf("%v is set.", common.EnvVarOAuthTokenInfo))
if err != nil { // this is the case when env var exists while get token info failed
return err
}
} else { // Scenario-2
oAuthTokenInfo, err = uotm.GetCachedTokenInfo()
if err != nil {
return err
}
}
if oAuthTokenInfo == nil {
return errors.New("cannot get valid oauth token")
}
credentialInfo.OAuthTokenInfo = *oAuthTokenInfo
}
glcm.Info(fmt.Sprintf("Resume uses credential type %q.\n", credentialInfo.CredentialType))
// Send resume job request.
var resumeJobResponse common.CancelPauseResumeResponse
Rpc(common.ERpcCmd.ResumeJob(),
&common.ResumeJobRequest{
JobID: jobID,
SourceSAS: rca.SourceSAS,
DestinationSAS: rca.DestinationSAS,
CredentialInfo: credentialInfo,
IncludeTransfer: includeTransfer,
ExcludeTransfer: excludeTransfer,
},
&resumeJobResponse)
if !resumeJobResponse.CancelledPauseResumed {
glcm.ExitWithError(resumeJobResponse.ErrorMsg, common.EExitCode.Error())
}
controller := resumeJobController{jobID: jobID}
glcm.WaitUntilJobCompletion(&controller)
return nil
}
|
package xml
import "testing"
var testData = []byte(`
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">GASTT</string>
<string name="title_section1">Section 1</string>
<string name="title_section2">Section 2</string>
<string name="title_section3">Section 3</string>
<string name="hello_world">Hello world!</string>
<string name="action_settings">Settings</string>
</resources>`)
func TestReader(t *testing.T) {
r := Read(testData)
if r["app_name"] != "GASTT" {
t.Error("Unmarshall is not ok!")
}
}
|
package auth
import (
"bytes"
"context"
"encoding/json"
"errors"
"strings"
"github.com/aquasecurity/lmdrouter"
"github.com/aws/aws-lambda-go/events"
)
type key string
var keyUser = key("authUser")
func UserFromContext(ctx context.Context) string {
if v, ok := ctx.Value(keyUser).(string); ok {
return v
}
return ""
}
func AuthMiddleware(next lmdrouter.Handler) lmdrouter.Handler {
return func(ctx context.Context, req events.APIGatewayProxyRequest) (
res events.APIGatewayProxyResponse,
err error,
) {
token, err := extractToken(req.Headers["Authorization"])
if err != nil {
return createErrorResponse(401, err)
}
isValid, username := ParseToken(token)
if !isValid || username == "" {
return createErrorResponse(401, errors.New("Invalid token"))
}
ctx2 := context.WithValue(ctx, keyUser, username)
res, err = next(ctx2, req)
return res, err
}
}
func extractToken(authHeader string) (string, error) {
parts := strings.Split(authHeader, " ")
if len(parts) != 2 {
return "", errors.New("Expecting header 'Authorization: Bearer <TOKEN>'")
}
if parts[0] != "Bearer" {
return "", errors.New("Expecting header 'Authorization: Bearer <TOKEN>'")
}
return parts[1], nil
}
func ParseToken(token string) (isValid bool, username string) {
return googleOAuthTokenParser(token)
}
func createErrorResponse(statusCode int, err error) (Response, error) {
body, err := json.Marshal(map[string]interface{}{
"success": false,
"message": err.Error(),
})
if err != nil {
return Response{StatusCode: 500}, err
}
var buf bytes.Buffer
json.HTMLEscape(&buf, body)
resp := Response{
StatusCode: statusCode,
IsBase64Encoded: false,
Body: buf.String(),
Headers: map[string]string{
"Content-Type": "application/json",
},
}
return resp, nil
}
type Response = events.APIGatewayProxyResponse
type Request = events.APIGatewayProxyRequest
|
package fileio
import (
"os"
)
// Reads the input of a file and returns it as a string
func ReadInput(fileName string) string {
file, err := os.Open(fileName)
if err != nil {
panic(err)
}
// close the file when we are done with it
defer file.Close()
// get the file size
stat, err := file.Stat()
if err != nil {
panic(err)
}
// read the file
bs := make([]byte, stat.Size())
_, err = file.Read(bs)
if err != nil {
panic(err)
}
str := string(bs)
return str
} |
/*
Name : Kamil KAPLAN
Date : 27.07.2019
*/
package goweather
import (
"github.com/PROJECTS/goWeatherPackage/models"
"os"
"strings"
)
func fileIsExists(name string) bool {
_, err := os.Stat(name)
if err != nil {
// dosyanın ver olup olmasdığını kontrol eeriz.
if os.IsNotExist(err) {
return false
}
}
return true
}
func fixEnglishChars(val string) string {
val = strings.Replace(val, "ğ", "g", 100)
val = strings.Replace(val, "ü", "u", 100)
val = strings.Replace(val, "ş", "s", 100)
val = strings.Replace(val, "ı", "i", 100)
val = strings.Replace(val, "ö", "o", 100)
val = strings.Replace(val, "ç", "c", 100)
val = strings.Replace(val, "Ğ", "G", 100)
val = strings.Replace(val, "Ü", "U", 100)
val = strings.Replace(val, "Ş", "S", 100)
val = strings.Replace(val, "İ", "I", 100)
val = strings.Replace(val, "Ö", "O", 100)
val = strings.Replace(val, "Ç", "C", 100)
return val
}
func fixEngCharOnLocation(val models.Location) models.Location {
val.EnglishName = fixEnglishChars(val.EnglishName)
val.AdministrativeArea.EnglishName = fixEnglishChars(val.AdministrativeArea.EnglishName)
return val
}
|
package todo_module
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
models "todos/modules/todo/models"
"fmt"
"strconv"
)
type TodoController struct {
beego.Controller
}
func (this *TodoController) Prepare() {
beego.ViewsPath="modules/todo/views"
this.Layout = "layout.tpl"
}
func (this *TodoController) Get() {
beego.ReadFromRequest(&this.Controller)
o := orm.NewOrm()
o.Using("default")
var todos []models.Todo
o.QueryTable("todo").All(&todos)
this.TplNames = "list.tpl"
this.LayoutSections = make(map[string]string)
this.LayoutSections["FormTodo"] = "form.tpl"
this.Data["todos"]=todos
}
func (this *TodoController) Post() {
o := orm.NewOrm()
o.Using("default")
todo := models.Todo{}
if err := this.ParseForm(&todo); err != nil {
this.Ctx.WriteString(fmt.Sprintf("%v",err))
}
if todo.Id != 0 {
o.Update(&todo)
} else {
if _,err := o.Insert(&todo); err != nil {
this.Ctx.WriteString(fmt.Sprintf("%v",err))
}
}
flash:=beego.NewFlash()
flash.Notice("Todo has been deleted successfully")
flash.Store(&this.Controller)
this.Ctx.Redirect(302, "/todo")
return
}
func (this *TodoController) ReadTodo() {
o := orm.NewOrm()
o.Using("default")
id := this.Ctx.Input.Param(":id")
todoid,err := strconv.Atoi(id)
if err != nil {
this.Ctx.WriteString(fmt.Sprintf("%v",err))
}
todo := models.Todo{}
todo.Id = todoid
err = o.Read(&todo)
if err == orm.ErrNoRows {
this.Ctx.WriteString("No result found.")
} else if err == orm.ErrMissPK {
this.Ctx.WriteString("No primary key found.")
}
this.Data["todo"] = todo
this.TplNames = "form.tpl"
}
func (this *TodoController) DeleteTodo() {
o := orm.NewOrm()
o.Using("default")
id := this.Ctx.Input.Param(":id")
todoid,err := strconv.Atoi(id)
if err != nil {
this.Ctx.WriteString(fmt.Sprintf("%v",err))
}
if _,err := o.Delete(&models.Todo{Id:todoid}); err != nil {
this.Ctx.WriteString(fmt.Sprintf("%v",err))
}
flash:=beego.NewFlash()
flash.Notice("Todo has been deleted successfully")
flash.Store(&this.Controller)
this.Ctx.Redirect(302,"/todo")
return
} |
package main
import (
"fmt"
)
// An interface
type FI interface {
F()
}
// A parent with a default implementation of FI
type parent struct {
FI
}
func (s *parent) F() {
fmt.Printf("parent.F()\n");
}
func (s *parent) doit() {
s.FI.F()
}
func NewParent() (rv * parent) {
rv = new(parent)
rv.FI = rv
return
}
// A child1 with a different FI
type child1 struct {
parent // Anonymous parent struct to get default implementations
}
func (s *child1) F() {
fmt.Printf("child1.F()\n");
}
func NewChild1() * child1 {
c := new(child1)
c.FI = c
return c
}
// A child2 with default FI
type child2 struct {
parent // Anonymous parent struct to get default implementations
j int
}
func NewChild2() * child2 {
c := new(child2)
c.FI = c
c.j = 3
return c
}
func main() {
p := NewParent()
c1 := NewChild1()
c2 := NewChild2()
p.doit()
c1.doit()
c2.doit()
}
/*
Output:
parent.F()
child1.F()
parent.F()
*/
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package common
import (
"time"
appsv1 "k8s.io/api/apps/v1"
)
// Datadog const value
const (
// AgentDeploymentNameLabelKey label key use to link a Resource to a DatadogAgent
AgentDeploymentNameLabelKey = "agent.datadoghq.com/name"
// AgentDeploymentComponentLabelKey label key use to know with component is it
AgentDeploymentComponentLabelKey = "agent.datadoghq.com/component"
// MD5AgentDeploymentAnnotationKey annotation key used on a Resource in order to identify which AgentDeployment have been used to generate it.
MD5AgentDeploymentAnnotationKey = "agent.datadoghq.com/agentspechash"
// MD5ChecksumAnnotationKey annotation key is used to identify customConfig configurations
MD5ChecksumAnnotationKey = "checksum/%s-custom-config"
// MD5ChecksumAnnotationKey is part of the key name to identify custom seccomp configurations
MD5ChecksumSeccompProfileAnnotationName = "%s-seccomp"
// DefaultAgentResourceSuffix use as suffix for agent resource naming
DefaultAgentResourceSuffix = "agent"
// DefaultClusterAgentResourceSuffix use as suffix for cluster-agent resource naming
DefaultClusterAgentResourceSuffix = "cluster-agent"
// DefaultClusterChecksRunnerResourceSuffix use as suffix for cluster-checks-runner resource naming
DefaultClusterChecksRunnerResourceSuffix = "cluster-checks-runner"
// DefaultMetricsServerResourceSuffix use as suffix for cluster-agent metrics-server resource naming
DefaultMetricsServerResourceSuffix = "cluster-agent-metrics-server"
// DefaultAPPKeyKey default app-key key (use in secret for instance).
DefaultAPPKeyKey = "app_key"
// DefaultAPIKeyKey default api-key key (use in secret for instance).
DefaultAPIKeyKey = "api_key"
// DefaultTokenKey default token key (use in secret for instance).
DefaultTokenKey = "token"
// DefaultClusterAgentReplicas default cluster-agent deployment replicas
DefaultClusterAgentReplicas = 1
// DefaultClusterAgentServicePort default cluster-agent service port
DefaultClusterAgentServicePort = 5005
// DefaultClusterChecksRunnerReplicas default cluster checks runner deployment replicas
DefaultClusterChecksRunnerReplicas = 1
// DefaultMetricsServerServicePort default metrics-server port
DefaultMetricsServerServicePort = 443
// DefaultMetricsServerTargetPort default metrics-server pod port
DefaultMetricsServerTargetPort = int(DefaultMetricsProviderPort)
// DefaultAdmissionControllerServicePort default admission controller service port
DefaultAdmissionControllerServicePort = 443
// DefaultAdmissionControllerTargetPort default admission controller pod port
DefaultAdmissionControllerTargetPort = 8000
// DefaultDogstatsdPort default dogstatsd port
DefaultDogstatsdPort = 8125
// DefaultDogstatsdPortName default dogstatsd port name
DefaultDogstatsdPortName = "dogstatsdport"
// DefaultApmPort default apm port
DefaultApmPort = 8126
// DefaultApmPortName default apm port name
DefaultApmPortName = "traceport"
// DefaultMetricsProviderPort default metrics provider port
DefaultMetricsProviderPort int32 = 8443
// DefaultKubeStateMetricsCoreConf default ksm core ConfigMap name
DefaultKubeStateMetricsCoreConf string = "kube-state-metrics-core-config"
// DefaultOrchestratorExplorerConf default orchestrator explorer ConfigMap name
DefaultOrchestratorExplorerConf string = "orchestrator-explorer-config"
// DefaultSystemProbeSocketPath default System Probe socket path
DefaultSystemProbeSocketPath string = "/var/run/sysprobe/sysprobe.sock"
// DefaultCSPMConf default CSPM ConfigMap name
DefaultCSPMConf string = "cspm-config"
// DefaultCWSConf default CWS ConfigMap name
DefaultCWSConf string = "cws-config"
// Liveness probe default config
DefaultLivenessProbeInitialDelaySeconds int32 = 15
DefaultLivenessProbePeriodSeconds int32 = 15
DefaultLivenessProbeTimeoutSeconds int32 = 5
DefaultLivenessProbeSuccessThreshold int32 = 1
DefaultLivenessProbeFailureThreshold int32 = 6
DefaultAgentHealthPort int32 = 5555
DefaultLivenessProbeHTTPPath = "/live"
// Readiness probe default config
DefaultReadinessProbeInitialDelaySeconds int32 = 15
DefaultReadinessProbePeriodSeconds int32 = 15
DefaultReadinessProbeTimeoutSeconds int32 = 5
DefaultReadinessProbeSuccessThreshold int32 = 1
DefaultReadinessProbeFailureThreshold int32 = 6
DefaultReadinessProbeHTTPPath = "/ready"
// Default Image name
DefaultAgentImageName string = "agent"
DefaultClusterAgentImageName string = "cluster-agent"
DefaultImageRegistry string = "gcr.io/datadoghq"
// ExtendedDaemonset defaulting
DefaultRollingUpdateMaxUnavailable = "10%"
DefaultUpdateStrategy = appsv1.RollingUpdateDaemonSetStrategyType
DefaultRollingUpdateMaxPodSchedulerFailure = "10%"
DefaultRollingUpdateMaxParallelPodCreation int32 = 250
DefaultRollingUpdateSlowStartIntervalDuration = 1 * time.Minute
DefaultRollingUpdateSlowStartAdditiveIncrease = "5"
DefaultReconcileFrequency = 10 * time.Second
KubeServicesAndEndpointsConfigProviders = "kube_services kube_endpoints"
KubeServicesAndEndpointsListeners = "kube_services kube_endpoints"
EndpointsChecksConfigProvider = "endpointschecks"
ClusterAndEndpointsConfigProviders = "clusterchecks endpointschecks"
)
// Annotations
const (
SystemProbeAppArmorAnnotationKey = "container.apparmor.security.beta.kubernetes.io/system-probe"
SystemProbeAppArmorAnnotationValue = "unconfined"
)
// Datadog volume names and mount paths
const (
ConfdVolumeName = "confd"
ConfdVolumePath = "/conf.d"
ConfigVolumeName = "config"
ConfigVolumePath = "/etc/datadog-agent"
KubeStateMetricCoreVolumeName = "ksm-core-config"
OrchestratorExplorerVolumeName = "orchestrator-explorer-config"
ChecksdVolumeName = "checksd"
ChecksdVolumePath = "/checks.d"
HostRootVolumeName = "hostroot"
HostRootHostPath = "/"
HostRootMountPath = "/host/root"
GroupVolumeName = "group"
GroupHostPath = "/etc/group"
GroupMountPath = "/etc/group"
PasswdVolumeName = "passwd"
PasswdHostPath = "/etc/passwd"
PasswdMountPath = "/etc/passwd"
ProcdirVolumeName = "procdir"
ProcdirHostPath = "/proc"
ProcdirMountPath = "/host/proc"
CgroupsVolumeName = "cgroups"
CgroupsHostPath = "/sys/fs/cgroup"
CgroupsMountPath = "/host/sys/fs/cgroup"
SystemProbeOSReleaseDirVolumeName = "host-osrelease"
SystemProbeOSReleaseDirVolumePath = "/etc/os-release"
SystemProbeOSReleaseDirMountPath = "/host/etc/os-release"
SystemProbeSocketVolumeName = "sysprobe-socket-dir"
SystemProbeSocketVolumePath = "/var/run/sysprobe"
DebugfsVolumeName = "debugfs"
// same path on host and container
DebugfsPath = "/sys/kernel/debug"
TracefsVolumeName = "tracefs"
TracefsPath = "/sys/kernel/tracing"
SecurityfsVolumeName = "securityfs"
SecurityfsVolumePath = "/sys/kernel/security"
SecurityfsMountPath = "/host/sys/kernel/security"
ModulesVolumeName = "modules"
// same path on host and container
ModulesVolumePath = "/lib/modules"
SrcVolumeName = "src"
// same path on host and container
SrcVolumePath = "/usr/src"
AgentCustomConfigVolumePath = "/etc/datadog-agent/datadog.yaml"
SystemProbeConfigVolumePath = "/etc/datadog-agent/system-probe.yaml"
LogDatadogVolumeName = "logdatadog"
LogDatadogVolumePath = "/var/log/datadog"
TmpVolumeName = "tmp"
TmpVolumePath = "/tmp"
CertificatesVolumeName = "certificates"
CertificatesVolumePath = "/etc/datadog-agent/certificates"
AuthVolumeName = "datadog-agent-auth"
AuthVolumePath = "/etc/datadog-agent/auth"
InstallInfoVolumeName = "installinfo"
InstallInfoVolumeSubPath = "install_info"
InstallInfoVolumePath = "/etc/datadog-agent/install_info"
InstallInfoVolumeReadOnly = true
PointerVolumeName = "pointerdir"
PointerVolumePath = "/opt/datadog-agent/run"
LogTempStoragePath = "/var/lib/datadog-agent/logs"
PodLogVolumeName = "logpodpath"
PodLogVolumePath = "/var/log/pods"
ContainerLogVolumeName = "logcontainerpath"
ContainerLogVolumePath = "/var/lib/docker/containers"
SymlinkContainerVolumeName = "symlinkcontainerpath"
SymlinkContainerVolumePath = "/var/log/containers"
DogstatsdHostPortName = "dogstatsdport"
DogstatsdHostPortHostPort = 8125
DogstatsdSocketVolumeName = "dsdsocket"
DogstatsdAPMSocketHostPath = "/var/run/datadog"
DogstatsdSocketLocalPath = "/var/run/datadog"
DogstatsdSocketName = "dsd.socket"
SecurityAgentComplianceCustomConfigDirVolumeName = "customcompliancebenchmarks"
SecurityAgentComplianceConfigDirVolumeName = "compliancedir"
SecurityAgentComplianceConfigDirVolumePath = "/etc/datadog-agent/compliance.d"
SecurityAgentRuntimeCustomPoliciesVolumeName = "customruntimepolicies"
SecurityAgentRuntimeCustomPoliciesVolumePath = "/etc/datadog-agent-runtime-policies"
SecurityAgentRuntimePoliciesDirVolumeName = "runtimepoliciesdir"
SecurityAgentRuntimePoliciesDirVolumePath = "/etc/datadog-agent/runtime-security.d"
HostCriSocketPathPrefix = "/host"
CriSocketVolumeName = "runtimesocketdir"
RuntimeDirVolumePath = "/var/run"
KubeletAgentCAPath = "/var/run/host-kubelet-ca.crt"
KubeletCAVolumeName = "kubelet-ca"
APMHostPortName = "traceport"
APMHostPortHostPort = 8126
APMSocketVolumeName = "apmsocket"
APMSocketVolumeLocalPath = "/var/run/datadog"
APMSocketName = "apm.socket"
AdmissionControllerPortName = "admissioncontrollerport"
AdmissionControllerSocketCommunicationMode = "socket"
ExternalMetricsPortName = "metricsapi"
ExternalMetricsAPIServiceName = "v1beta1.external.metrics.k8s.io"
OTLPGRPCPortName = "otlpgrpcport"
OTLPHTTPPortName = "otlphttpport"
SeccompSecurityVolumeName = "datadog-agent-security"
SeccompSecurityVolumePath = "/etc/config"
SeccompRootVolumeName = "seccomp-root"
SeccompRootVolumePath = "/host/var/lib/kubelet/seccomp"
SeccompRootPath = "/var/lib/kubelet/seccomp"
SystemProbeSeccompKey = "system-probe-seccomp.json"
SystemProbeAgentSecurityConfigMapSuffixName = "system-probe-seccomp"
SystemProbeSeccompProfileName = "system-probe"
AppArmorAnnotationKey = "container.apparmor.security.beta.kubernetes.io"
AgentCustomConfigVolumeName = "custom-datadog-yaml"
AgentCustomConfigVolumeSubPath = "datadog.yaml"
ClusterAgentCustomConfigVolumeName = "custom-cluster-agent-yaml"
ClusterAgentCustomConfigVolumePath = "/etc/datadog-agent/datadog-cluster.yaml"
ClusterAgentCustomConfigVolumeSubPath = "datadog-cluster.yaml"
)
const (
// FieldPathSpecNodeName used as FieldPath for selecting the NodeName
FieldPathSpecNodeName = "spec.nodeName"
// FieldPathStatusHostIP used as FieldPath to retrieve the host ip
FieldPathStatusHostIP = "status.hostIP"
// FieldPathStatusPodIP used as FieldPath to retrieve the pod ip
FieldPathStatusPodIP = "status.podIP"
// FieldPathMetaName used as FieldPath to retrieve the pod name
FieldPathMetaName = "metadata.name"
)
|
package hookstage
import (
"context"
"github.com/prebid/openrtb/v19/openrtb2"
)
// ProcessedAuctionRequest hooks are invoked after the request is parsed
// and enriched with additional data.
//
// At this stage, account config is available,
// so it can be configured at the account-level execution plan,
// the account-level module config is passed to hooks.
//
// Rejection results in sending an empty BidResponse
// with the NBR code indicating the rejection reason.
type ProcessedAuctionRequest interface {
HandleProcessedAuctionHook(
context.Context,
ModuleInvocationContext,
ProcessedAuctionRequestPayload,
) (HookResult[ProcessedAuctionRequestPayload], error)
}
// ProcessedAuctionRequestPayload consists of the openrtb2.BidRequest object.
// Hooks are allowed to modify openrtb2.BidRequest using mutations.
type ProcessedAuctionRequestPayload struct {
BidRequest *openrtb2.BidRequest
}
|
package userssvc
import (
"context"
"fmt"
pb "github.com/cagodoy/tenpo-challenge/lib/proto"
users "github.com/cagodoy/tenpo-users-api"
"github.com/cagodoy/tenpo-users-api/database"
"github.com/cagodoy/tenpo-users-api/service"
"golang.org/x/crypto/bcrypt"
)
var _ pb.UserServiceServer = (*Service)(nil)
// Service ...
type Service struct {
usersSvc users.Service
}
// New ...
func New(store database.Store) *Service {
return &Service{
usersSvc: service.NewUsers(store),
}
}
// UserGet Gets a user by ID.
func (us *Service) UserGet(ctx context.Context, gr *pb.UserGetRequest) (*pb.UserGetResponse, error) {
id := gr.GetUserId()
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Get][Request] id = %v", id))
user, err := us.usersSvc.GetByID(id)
if err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Get][Error] %v", err))
return &pb.UserGetResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 500,
Message: err.Error(),
},
}, nil
}
res := &pb.UserGetResponse{
Meta: nil,
Data: user.ToProto(),
Error: nil,
}
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Get][Response] %v", res))
return res, nil
}
// UserGetByEmail get a user by Email
func (us *Service) UserGetByEmail(ctx context.Context, gr *pb.UserGetByEmailRequest) (*pb.UserGetByEmailResponse, error) {
email := gr.GetEmail()
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][GetByEmail][Request] email = %v", email))
if email == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][GetByEmail][Error] %v", "email user params empty"))
return &pb.UserGetByEmailResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 400,
Message: "email user params empty",
},
}, nil
}
user, err := us.usersSvc.GetByEmail(email)
if err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][GetByEmail][Error] %v", "user not found"))
return &pb.UserGetByEmailResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 404,
Message: "user not found",
},
}, nil
}
res := &pb.UserGetByEmailResponse{
Meta: nil,
Data: user.ToProto(),
Error: nil,
}
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][GetByEmail][Response] %v", res))
return res, nil
}
// UserCreate creates a new user into database.
func (us *Service) UserCreate(ctx context.Context, gr *pb.UserCreateRequest) (*pb.UserCreateResponse, error) {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Request] data = %v", gr.GetData()))
email := gr.GetData().GetEmail()
if email == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Error] %v", "email user param is empty"))
return &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 400,
Message: "email user param is empty",
},
}, nil
}
_, err := us.usersSvc.GetByEmail(email)
if err != nil {
name := gr.GetData().GetName()
if name == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Error] %v", "name user param is empty"))
return &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 400,
Message: "name user param is empty",
},
}, nil
}
password := gr.GetData().GetPassword()
if password == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Error] %v", "password user params is empty"))
return &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 400,
Message: "password user params is empty",
},
}, nil
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Error] %v", err))
return &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 500,
Message: "could not generate hashed password",
},
}, nil
}
user := &users.User{
Email: email,
Name: name,
Password: string(hashedPassword),
}
if err := us.usersSvc.Create(user); err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Error] %v", err))
return &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 500,
Message: err.Error(),
},
}, nil
}
res := &pb.UserCreateResponse{
Meta: nil,
Data: user.ToProto(),
Error: nil,
}
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Response] %v", res))
return res, nil
}
res := &pb.UserCreateResponse{
Meta: nil,
Data: nil,
Error: &pb.Error{
Code: 400,
Message: "user already exists",
},
}
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][Create][Response] %v", res))
return res, nil
}
// UserVerifyPassword ...
func (us *Service) UserVerifyPassword(ctx context.Context, gr *pb.UserVerifyPasswordRequest) (*pb.UserVerifyPasswordResponse, error) {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Request] email = %v password = %v", gr.GetEmail(), gr.GetPassword()))
email := gr.GetEmail()
if email == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Error] %v", "email user param is empty"))
return &pb.UserVerifyPasswordResponse{
Valid: false,
Error: &pb.Error{
Code: 400,
Message: "email user param is empty",
},
}, nil
}
password := gr.GetPassword()
if password == "" {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Error] %v", "password user param is empty"))
return &pb.UserVerifyPasswordResponse{
Valid: false,
Error: &pb.Error{
Code: 400,
Message: "password user param is empty",
},
}, nil
}
user, err := us.usersSvc.GetByEmail(email)
if err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Error] %v", "user not found"))
return &pb.UserVerifyPasswordResponse{
Valid: false,
Error: &pb.Error{
Code: 404,
Message: "user not found",
},
}, nil
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
if err != nil {
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Error] %v", "invalid password"))
return &pb.UserVerifyPasswordResponse{
Valid: false,
Error: &pb.Error{
Code: 400,
Message: "invalid password",
},
}, nil
}
res := &pb.UserVerifyPasswordResponse{
Valid: true,
Error: nil,
}
fmt.Println(fmt.Sprintf("[gRPC][TenpoUsersService][VerifyPassword][Response] %v", res))
return res, nil
}
// UserList return a collection of users.
func (us *Service) UserList(ctx context.Context, gr *pb.UserListRequest) (*pb.UserListResponse, error) {
fmt.Println(fmt.Sprintf("[GRPC][UsersService][List][Request] empty = %v", ""))
//TODO(ca): check bdd connection
listedUsers, err := us.usersSvc.UserList()
if err != nil {
fmt.Println(fmt.Sprintf("[GRPC][UsersService][List][Error] %v", err))
return &pb.UserListResponse{
Data: nil,
Error: &pb.Error{
Code: 500,
Message: err.Error(),
},
}, nil
}
data := make([]*pb.User, 0)
for _, user := range listedUsers {
data = append(data, user.ToProto())
}
res := &pb.UserListResponse{
Data: data,
Error: nil,
}
fmt.Println(fmt.Sprintf("[GRPC][UsersService][List][Response] %v", res))
return res, nil
}
|
package util
import (
"board"
)
func AvailableMoves(b *board.Board) []*board.Move {
av := make([]*board.Move, 0)
availableMoves := &av
addAvailableWallMoves(b, availableMoves)
addAvailableStepMoves(b, availableMoves)
addAvailableJumpMoves(b, availableMoves)
return *availableMoves
}
func addAvailableWallMoves(b *board.Board, availableMoves *[]*board.Move) {
for r := 0; r < b.NRows-1; r++ {
for c := 0; c < b.NCols-1; c++ {
pos := &board.Pos{
Row: r,
Col: c,
}
// horizontal wall
var move = &board.Move{
Mt: board.HorizWall,
Pos: pos,
}
addMoveIfValid(b, move, availableMoves)
// vertical wall
move.Mt = board.VertiWall
addMoveIfValid(b, move, availableMoves)
}
}
}
func addAvailableStepMoves(b *board.Board, availableMoves *[]*board.Move) {
var move *board.Move
move = board.StepMove(board.Down)
addMoveIfValid(b, move, availableMoves)
move = board.StepMove(board.Up)
addMoveIfValid(b, move, availableMoves)
move = board.StepMove(board.Left)
addMoveIfValid(b, move, availableMoves)
move = board.StepMove(board.Right)
addMoveIfValid(b, move, availableMoves)
}
func addAvailableJumpMoves(b *board.Board, availableMoves *[]*board.Move) {
var enemyPos *board.Pos
if b.CurPlayer {
enemyPos = b.Pos0
} else {
enemyPos = b.Pos1
}
for _, futurePos := range b.Neighbors(enemyPos) {
var move = &board.Move{
Mt: board.Jump,
Pos: futurePos,
}
addMoveIfValid(b, move, availableMoves)
}
}
func addMoveIfValid(b *board.Board, move *board.Move, availableMoves *[]*board.Move) {
var boardCopy = b.Copy()
if err := boardCopy.MakeMove(b.CurPlayer, move); err == nil && boardCopy.Validate() {
*availableMoves = append(*availableMoves, move.Copy())
}
}
|
package main
import (
"fmt"
)
func main() {
var numFirst int
var operator string
var numSecond int
var result int
fmt.Print("Please enter operator : ")
fmt.Scan(&operator)
operator = operatorCheck(operator)
fmt.Print("Please enter first number : ")
fmt.Scan(&numFirst)
// numFirstType := fmt.Sprintf("%T", numFirst)
// operandCheck(numFirstType)
fmt.Print("Please enter second number : ")
fmt.Scan(&numSecond)
result = cal(operator, numFirst, numSecond)
fmt.Printf("Result : %d", result)
}
func cal(operator string, numFirst int, numSecond int) int {
var result int
switch operator {
case "+":
result = numFirst + numSecond
case "-":
result = numFirst - numSecond
case "*":
result = numFirst * numSecond
case "/":
result = numFirst / numSecond
}
return result
}
func operatorCheck(operator string) string {
for i := 0; i < 1; {
switch operator {
case "+", "-", "*", "/":
i++
default:
fmt.Println("Wrong operator Please try again.")
fmt.Print("Please enter operator : ")
fmt.Scan(&operator)
}
}
return operator
}
// func operandCheck(numFirstType string) int {
// for i := 0; i < 1; {
// if numFirstType != "int" {
// fmt.Println("Invalid number please try again.")
// fmt.Print("Please enter operator: ")
// fmt.Scan(&numFirstType)
// } else {
// i++
// }
// }
// return num
// }
|
package main
import (
"fmt"
"log"
"os"
)
func main() {
args := os.Args
if len(args) < 2 {
fmt.Println("Usage: permission filename")
return
}
stat,err := os.Stat(args[1])
if err != nil {
log.Fatal(err)
}
fmt.Println(stat.IsDir(),stat.Name(),stat.Size(),stat.Mode().Perm())
}
|
package data
type ClashX struct {
Port int
SocksPort int `yaml:"socks-port"`
AllowLan bool `yaml:"allow-lan"`
Mode string
LogLevel string `yaml:"log-level"`
ExternalController string `yaml:"external-controller"`
Secret string
Dns *ClashXDNS
Proxy *[]ClashXProxy `yaml:"proxies"`
ProxyGroup *[]ClashXProxyGroup `yaml:"proxy-groups"`
Rule []string `yaml:"rules"`
}
type ClashXDNS struct {
Enable bool
Ipv6 bool
Nameserver []string
Fallback []string
FallbackFilter ClashFallbackFilter `yaml:"fallback-filter"`
}
type ClashFallbackFilter struct {
Geoip bool `yaml:"geoip"`
Ipcidr []string `yaml:"ipcidr"`
Domain []string `yaml:"domain"`
}
type ClashXProxy struct {
Name string
Type string
Server string
Port int
Password string
Alpn []string
SkipCertVerify bool `yaml:"skip-cert-verify"`
}
type ClashXProxyGroup struct {
Name string
Type string
Url string `yaml:",omitempty"`
Interval int `yaml:",omitempty"`
Proxies []string
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"github.com/mattermost/mattermost-cloud/model"
log "github.com/sirupsen/logrus"
)
type unmanaged struct {
utilityName string
logger log.FieldLogger
}
func newUnmanagedHandle(utilityName string, logger log.FieldLogger) *unmanaged {
return &unmanaged{
utilityName: utilityName,
logger: logger.WithFields(log.Fields{
"cluster-utility": utilityName,
"unmanaged": true,
}),
}
}
func (u *unmanaged) ValuesPath() string {
return ""
}
func (u *unmanaged) CreateOrUpgrade() error {
u.logger.WithField("unmanaged-action", "create").Info("Utility is unmanaged; skippping...")
return nil
}
func (u *unmanaged) Destroy() error {
u.logger.WithField("unmanaged-action", "destroy").Info("Utility is unmanaged; skippping...")
return nil
}
func (u *unmanaged) Migrate() error {
return nil
}
func (u *unmanaged) Name() string {
return u.utilityName
}
func (u *unmanaged) DesiredVersion() *model.HelmUtilityVersion {
return &model.HelmUtilityVersion{Chart: model.UnmanagedUtilityVersion}
}
func (u *unmanaged) ActualVersion() *model.HelmUtilityVersion {
return &model.HelmUtilityVersion{Chart: model.UnmanagedUtilityVersion}
}
|
// Defining the root command.
package cmd
import (
"errors"
"fmt"
"github.com/JosephLai241/shift/utils"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "shift",
Short: "A command-line application for tracking shift (clock-in, clock-out, shift duration, etc.) data",
Long: `
_ _ ___ _
___| |_|_| _| |_
|_ -| | | _| _|
|___|_|_|_|_| |_|
Author: Joseph Lai
GitHub: https://github.com/JosephLai241/shift
'shift' is a command-line application designed for contractors/remote workers
who need to keep track of their own working hours. Or for anything else you want
to track. Inspired by Luke Schenk's Python CLI tool 'clck'.
This program performs CRUD operations on your local machine for the following:
- clock-in time
- clock-out time
- shift duration
- any messages associated with a clock-in or clock-out command call
|| NOTE: 'shift' initializes and reads from files in your current working directory.
|| Run it in a directory in which you would like all your records and program-related
|| files to be stored.
---
The commands you will likely interact with most are:
- in
- status
- out
There are additional commands that may be very useful to you as your records grow
in size. These commands are:
- amend
- list
- delete
Almost all of the commands included in this program contain additional, optional
flags or sub-commands that provide granular control over its operations. I strongly
recommend looking at the help menu for each command to fully take advantage of
the features included for each. You can do so by running 'shift help [COMMAND_NAME]'.
---
This program allows you to configure how you want to save your recorded shifts.
There are two available options:
- timesheet (CSV spreadsheets)
- database (relational SQLite database)
Timesheet is the default option; however, you can configure 'shift' to record shifts
to a SQLite database instead by using the 'storage' command. See the help menu for
the command to learn more about how to do so.
If 'shift' is configured to record shifts in timesheets, the directory 'shifts' is
created in your current working directory. This directory contains a sub-directory
labeled with the current year. CSV files labeled with the current month are created
within the year directory, which contain shift data. This is an example of the 'shifts'
directory if you ran 'shift' sometime during July 2021:
shifts/
└── 2021
└── July.csv
If 'shift' is configured to record shifts in the database instead, 'shifts.db' is
created in your current working directory. 'shift' then creates the main 'YEAR'
table, which holds the current year. The entry then points to a 'Y_CURRENT_YEAR'
table containing the months in which you ran 'shift'. Finally, the months point to a
'M_CURRENT_MONTH' table containing shift data. This is an example of the relationships
within 'shifts.db' if you ran 'shift' sometime during July 2021:
shifts.db
└── TABLE 'YEAR'
└── TABLE 'Y_2021'
└── TABLE 'M_July'
`,
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
cobra.CheckErr(rootCmd.Execute())
}
// Initialize the command-line interface.
func init() {
cobra.OnInitialize(initConfig)
}
// initConfig reads in the `.shiftconfig.yml` config file.
func initConfig() {
configFile := fmt.Sprintf("%s/%s", utils.GetCWD(), ".shiftconfig.yml")
viper.SetConfigFile(configFile)
viper.SetDefault("storage-type", "timesheet")
err := viper.ReadInConfig()
if err != nil {
viper.SafeWriteConfigAs(configFile)
}
envString := viper.GetString("storage-type")
acceptedValues := map[string]struct{}{
"timesheet": {},
"database": {},
}
if _, ok := acceptedValues[envString]; !ok {
utils.CheckError("`.shiftconfig.yml` error", errors.New(`
The "storage-type" value is invalid. Accepted values are:
- timesheet
- database
`))
}
}
|
package nfsserver
import (
"context"
goerrors "errors"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/storageos/cluster-operator/internal/pkg/storageoscluster"
storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
stosClientset "github.com/storageos/cluster-operator/pkg/client/clientset/versioned"
"github.com/storageos/cluster-operator/pkg/nfs"
"github.com/storageos/cluster-operator/pkg/util/k8s"
)
// ErrNoCluster is the error when there's no associated running StorageOS
// cluster found for NFS server.
var ErrNoCluster = goerrors.New("no storageos cluster found")
var log = logf.Log.WithName("controller_nfsserver")
const (
finalizer = "finalizer.nfsserver.storageos.com"
appComponent = "nfs-server"
reconcilePeriodSeconds = 15
)
// Add creates a new NFSServer Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
storageos := stosClientset.NewForConfigOrDie(mgr.GetConfig())
return &ReconcileNFSServer{
client: mgr.GetClient(),
kConfig: mgr.GetConfig(),
scheme: mgr.GetScheme(),
recorder: mgr.GetEventRecorderFor("storageos-nfsserver"),
stosClientset: storageos,
}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("nfsserver-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource NFSServer.
err = c.Watch(&source.Kind{Type: &storageosv1.NFSServer{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// Watch for changes to secondary resource StatefulSet and requeue the owner
// NFSServer.
err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &storageosv1.NFSServer{},
})
if err != nil {
return err
}
// Watch for changes to secondary resource Service and requeue the owner
// NFSServer.
//
// This is used to update the NFSServer Status with the connection endpoint
// once it comes online.
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &storageosv1.NFSServer{},
})
if err != nil {
return err
}
return nil
}
// blank assignment to verify that ReconcileNFSServer implements reconcile.Reconciler
var _ reconcile.Reconciler = &ReconcileNFSServer{}
// ReconcileNFSServer reconciles a NFSServer object
type ReconcileNFSServer struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
stosClientset stosClientset.Interface
scheme *runtime.Scheme
recorder record.EventRecorder
// k8s rest config is needed for creating a k8s discovery client, used by
// the osdk's metrics helpers to create Prometheus ServiceMonitor for NFS
// Server.
kConfig *rest.Config
}
// Reconcile reads that state of the cluster for a NFSServer object and makes changes based on the state read
// and what is in the NFSServer.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileNFSServer) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
// reqLogger.Info("Reconciling NFSServer")
reconcilePeriod := reconcilePeriodSeconds * time.Second
reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod}
// Fetch the NFSServer instance
instance := &storageosv1.NFSServer{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcileResult, err
}
if err := r.reconcile(instance); err != nil {
reqLogger.Info("Reconcile failed", "error", err)
return reconcileResult, nil
}
return reconcileResult, nil
}
func (r *ReconcileNFSServer) reconcile(instance *storageosv1.NFSServer) error {
// Add our finalizer immediately so we can cleanup a partial deployment. If
// this is not set, the CR can simply be deleted.
if len(instance.GetFinalizers()) == 0 {
// Add our finalizer so that we control deletion.
if err := r.addFinalizer(instance); err != nil {
return err
}
// Return here, as the update to add the finalizer will trigger another
// reconcile.
return nil
}
// Get a StorageOS cluster to associate the NFS server with.
stosCluster, err := storageoscluster.GetCurrentStorageOSCluster(r.client)
if err != nil {
return err
}
// Update NFS spec with values inferred from the StorageOS cluster.
updated, err := r.updateSpec(instance, stosCluster)
if err != nil {
return err
}
// Return here if the CR has been updated as the current instance is
// outdated.
if updated {
return nil
}
// Prepare for NFS deployment.
// Labels to be applied on all the k8s resources that are created for NFS
// server. Inherit the labels from the CR.
labels := instance.Labels
if labels == nil {
labels = map[string]string{}
}
// Add default labels.
// TODO: This is legacy label. Remove this with care. Ensure it's not used
// by any label selectors.
labels["app"] = "storageos"
// Set the app component.
labels[k8s.AppComponent] = appComponent
// Add default resource app labels.
labels = k8s.AddDefaultAppLabels(stosCluster.Name, labels)
d := nfs.NewDeployment(r.client, r.kConfig, stosCluster, instance, labels, r.recorder, r.scheme)
// If the CR has not been marked for deletion, ensure it is deployed.
if instance.GetDeletionTimestamp() == nil {
if err := d.Deploy(); err != nil {
// Ignore "Operation cannot be fulfilled" error. It happens when the
// actual state of object is different from what is known to the operator.
// Operator would resync and retry the failed operation on its own.
if !strings.HasPrefix(err.Error(), "Operation cannot be fulfilled") {
r.recorder.Event(instance, corev1.EventTypeWarning, "FailedCreation", err.Error())
}
return err
}
} else {
// Delete the deployment once the finalizers are set on the cluster
// resource.
r.recorder.Event(instance, corev1.EventTypeNormal, "Terminating", "Deleting the NFS server.")
if err := d.Delete(); err != nil {
return err
}
// Reset finalizers and let k8s delete the object.
// When finalizers are set on an object, metadata.deletionTimestamp is
// also set. deletionTimestamp helps the garbage collector identify
// when to delete an object. k8s deletes the object only once the
// list of finalizers is empty.
instance.SetFinalizers([]string{})
return r.client.Update(context.Background(), instance)
}
return nil
}
func (r *ReconcileNFSServer) addFinalizer(instance *storageosv1.NFSServer) error {
instance.SetFinalizers(append(instance.GetFinalizers(), finalizer))
// Update CR
err := r.client.Update(context.TODO(), instance)
if err != nil {
return err
}
return nil
}
// updateSpec takes a NFSServer CR and a StorageOSCluster CR and updates
// NFSServer if needed. It returns true if there was an update. This result can
// be used to decide if the caller should continue with reconcile or return from
// reconcile due to an outdated CR instance.
func (r *ReconcileNFSServer) updateSpec(instance *storageosv1.NFSServer, cluster *storageosv1.StorageOSCluster) (bool, error) {
needUpdate := false
// Check if any CR property needs to be updated.
sc := instance.Spec.GetStorageClassName(cluster.Spec.GetStorageClassName())
if instance.Spec.StorageClassName != sc {
instance.Spec.StorageClassName = sc
needUpdate = true
}
image := instance.Spec.GetContainerImage(cluster.Spec.GetNFSServerImage())
if instance.Spec.NFSContainer != image {
instance.Spec.NFSContainer = image
needUpdate = true
}
if needUpdate {
// Update CR.
err := r.client.Update(context.TODO(), instance)
if err != nil {
return false, err
}
return true, nil
}
return false, nil
}
|
package firebase
import (
"context"
"log"
firebase "firebase.google.com/go"
"firebase.google.com/go/auth"
"firebase.google.com/go/db"
)
// Controller contains app and client instances for Firebase related requests.
type Controller struct {
// Instance is the instance of the Firebase app.
Instance *firebase.App
// Client is the authentication instance used by the controller.
Client *auth.Client
// Database is the database instance used by the controller.
Database *db.Client
}
// New creates a new Firebase controller. Requires that GOOGLE_APPLICATION_CREDENTIALS
// environment variable be set.
func New() Controller {
app, err := firebase.NewApp(context.Background(), nil)
if err != nil {
log.Fatalf("Could not initialize Firebase SDK: \"%s\"", err)
}
client, err := app.Auth(context.Background())
if err != nil {
log.Fatalf("Could not authenticate with Firebase auth: \"%s\"", err)
}
db, err := app.Database(context.Background())
if err != nil {
log.Fatalf("Could not authenticate with Firebase database: \"%s\"", err)
}
return Controller{
Instance: app,
Client: client,
Database: db,
}
}
|
package migrate
import (
"github.com/pkg/errors"
"github.com/vim-volt/volt/lockjson"
"github.com/vim-volt/volt/transaction"
)
func init() {
m := &lockjsonMigrater{}
migrateOps[m.Name()] = m
}
type lockjsonMigrater struct{}
func (*lockjsonMigrater) Name() string {
return "lockjson"
}
func (m *lockjsonMigrater) Description(brief bool) string {
if brief {
return "converts old lock.json format to the latest format"
}
return `Usage
volt migrate [-help] ` + m.Name() + `
Description
Perform migration of $VOLTPATH/lock.json, which means volt converts old version lock.json structure into the latest version. This is always done automatically when reading lock.json content. For example, 'volt get <repos>' will install plugin, and migrate lock.json structure, and write it to lock.json after all. so the migrated content is written to lock.json automatically.
But, for example, 'volt list' does not write to lock.json but does read, so every time when running 'volt list' shows warning about lock.json is old.
To suppress this, running this command simply reads and writes migrated structure to lock.json.`
}
func (*lockjsonMigrater) Migrate() (err error) {
// Read lock.json
lockJSON, err := lockjson.ReadNoMigrationMsg()
if err != nil {
return errors.Wrap(err, "could not read lock.json")
}
// Begin transaction
trx, err := transaction.Start()
if err != nil {
return
}
defer func() {
if e := trx.Done(); e != nil {
err = e
}
}()
// Write to lock.json
err = lockJSON.Write()
if err != nil {
return errors.Wrap(err, "could not write to lock.json")
}
return
}
|
package model
import (
"gamesvr/manager"
"shared/common"
"shared/statistic/logreason"
"shared/utility/errors"
)
func (u *User) ReceiveChapterReward(rewardId int32) error {
chapterRewardCfg, err := manager.CSV.ChapterEntry.GetChapterReward(rewardId)
if err != nil {
return err
}
chapterId := chapterRewardCfg.ChapterId
chapter, ok := u.ChapterInfo.GetChapter(chapterId)
if !ok {
return errors.Swrapf(common.ErrChapterScoreNotArrival, chapterId)
}
if chapter.GetScore() < chapterRewardCfg.Number {
return errors.Swrapf(common.ErrChapterScoreNotArrival, chapterId)
}
if chapter.IsReceiveReward(rewardId) {
return errors.Swrapf(common.ErrChapterRewardReceived, chapterId, rewardId)
}
chapter.RecordReward(rewardId)
reason := logreason.NewReason(logreason.ChapterReward)
u.AddRewardsByDropId(chapterRewardCfg.DropId, reason)
return nil
}
func (u *User) CheckChapterUnlock(chapterId int32) error {
chapterCfg, err := manager.CSV.ChapterEntry.GetChapter(chapterId)
if err != nil {
return err
}
err = u.CheckUserConditions(chapterCfg.UnlockCondition)
if err != nil {
return err
}
return nil
}
func (u *User) CheckChapterRewardNoticeByType(chapterType int32) int32 {
count := int32(0)
chapterIds := manager.CSV.ChapterEntry.GetChapterIdsByType(chapterType)
for _, chapterId := range chapterIds {
chapter, ok := u.ChapterInfo.GetChapter(chapterId)
if !ok {
continue
}
rewardIds := manager.CSV.ChapterEntry.GetChapterRewardIds(chapterId)
for _, rewardId := range rewardIds {
if chapter.IsReceiveReward(rewardId) {
continue
}
rewardCfg, err := manager.CSV.ChapterEntry.GetChapterReward(rewardId)
if err != nil {
continue
}
if chapter.GetScore() >= rewardCfg.Number {
count = count + 1
}
}
}
return count
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
)
func init() {
// config := &Config{MaxGoroutines: 3}
// config.Store()
// config := &Config{}
// config.Load("config.json")
// fmt.Printf("%+v\n", config)
}
// Config .
type Config struct {
MaxGoroutines int `json:"maxGoroutines"` // maximum number of goroutines
}
// Load .
func (c *Config) Load(name string) error {
data, err := ioutil.ReadFile(name)
if err != nil {
return err
}
err = json.Unmarshal(data, c)
if err != nil {
return err
}
return nil
}
// Store .
func (c *Config) Store(name string) {
data, err := json.Marshal(c)
if err != nil {
fmt.Println(err)
}
file, err := os.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0777)
if err != nil {
fmt.Println(err)
}
defer file.Close()
_, err = file.Write(data)
if err != nil {
fmt.Println(err)
}
}
|
// Copyright © 2018 The TK8 Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package templates
var Credentials = `
AWS_ACCESS_KEY_ID = "{{.AwsAccessKeyID}}"
AWS_SECRET_ACCESS_KEY = "{{.AwsSecretKey}}"
AWS_SSH_KEY_NAME = "{{.AwsAccessSSHKey}}"
AWS_DEFAULT_REGION = "{{.AwsDefaultRegion}}"
`
|
package util
var itemsPerPage = 10
func PageLength(page int) (n, n2 int) {
start := (page - 1) * itemsPerPage
stop := start + itemsPerPage
start = start+1
return start, stop
}
func SqlOrder(order string) (string, from string) {
switch order {
case `recentlyAdded`:
order = `DATA_CADASTRO`
from = `ASC`
break
case `sales`:
order = `TOTAL_VENDAS`
from = `DESC`
break
case `description`:
order = `DESCRICAO`
from = `ASC`
break
case `recentlySold`:
break
default:
order = `TOTAL_VENDAS`
from = `DESC`
break
}
return order, from
} |
package testing
import (
"github.com/loft-sh/devspace/pkg/devspace/build"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/util/randutil"
)
// FakeController is the fake build controller
type FakeController struct {
BuiltImages map[string]string
}
// NewFakeController creates a new fake build controller
func NewFakeController(config *latest.Config) build.Controller {
builtImages := map[string]string{}
for _, image := range config.Images {
if image != nil && image.Docker == nil && image.Kaniko == nil && image.BuildKit == nil && image.Custom == nil {
continue
}
// This is necessary for parallel build otherwise we would override the image conf pointer during the loop
cImageConf := *image
imageName := cImageConf.Image
// Get image tag
imageTag := randutil.GenerateRandomString(7)
if len(image.Tags) > 0 {
imageTag = image.Tags[0]
}
builtImages[imageName] = imageTag
}
return &FakeController{
BuiltImages: builtImages,
}
}
// Build builds the images
func (f *FakeController) Build(ctx devspacecontext.Context, images []string, options *build.Options) error {
return nil
}
|
package wasp
import (
"syscall/js"
"./webgl"
dom "github.com/schabby/go-wasm-dom"
)
var jsDrawCallback, jsResizeCallback js.Func
func CreateWebGLApp(
init func(webgl.RenderingContext),
resize func(webgl.RenderingContext),
draw func(webgl.RenderingContext, int)) {
canvas := dom.FullPageCanvas()
glDOM := canvas.JsValue().Call("getContext", "webgl2")
gl := webgl.NewRenderingContext(glDOM)
// call once, used to set up application
init(gl)
// call once manually, but also register resize event
jsResizeCallback = js.FuncOf(func(this js.Value, inputs []js.Value) interface{} {
dpr := js.Global().Get("window").Get("devicePixelRatio").Float()
rect := canvas.JsValue().Call("getBoundingClientRect")
width := int(rect.Get("width").Float() * dpr)
height := int(rect.Get("height").Float() * dpr)
canvas.SetWidthI(width)
canvas.SetHeightI(height)
gl.Width = width
gl.Height = height
resize(gl)
return nil
})
js.Global().Set("pageResize", jsResizeCallback)
js.Global().Call("pageResize")
js.Global().Get("window").Call("addEventListener", "resize", jsResizeCallback)
jsDrawCallback = js.FuncOf(func(this js.Value, inputs []js.Value) interface{} {
draw(gl, inputs[0].Int())
js.Global().Call("requestAnimationFrame", jsDrawCallback)
return nil
})
//defer jsDrawCallback.Release()
// start rendering cycles
js.Global().Call("requestAnimationFrame", jsDrawCallback)
}
func LoadImage(url string, callback func(image js.Value)) {
imageHandle := js.Global().Get("Image").New()
imageHandle.Set("src", url)
imageHandle.Call("addEventListener", "load", js.FuncOf(func(image js.Value, inputs []js.Value) interface{} {
callback(image)
return nil
}))
}
|
package newfile
import (
"fmt"
)
func Return_value(val string) {
fmt.Println(val)
}
|
package main
import (
"fmt"
"time"
"net/http"
"log"
)
func Logger(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
h.ServeHTTP(w, r)
endTime := time.Since(startTime)
log.Printf("%s %d %v", r.URL, r.Method, endTime)
})
}
func loginHanlder(w http.ResponseWriter, r *http.Request) {
fmt.Println("in hanlder func")
}
func main() {
// http.handler("/login", Logger(loginHanlder))
http.Handle("/login", Logger(http.HandlerFunc(loginHanlder)))
http.ListenAndServe(":9091", nil)
} |
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/neckhair/smn_to_influx/core"
)
func getJson(url string, target interface{}) error {
r, err := http.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func main() {
if os.Getenv("INFLUXDB_DATABASE") == "" {
println("Please set INFLUXDB_DATABASE.")
os.Exit(1)
}
if os.Getenv("INFLUXDB_URL") == "" {
println("Please set INFLUXDB_URL.")
os.Exit(1)
}
if len(os.Args) <= 1 {
println("Usage: smn_to_influx <code>")
os.Exit(1)
}
url := fmt.Sprintf("http://opendata.netcetera.com:80/smn/smn/%s", os.Args[1])
record := &core.SmnRecord{Code: os.Args[1]}
getJson(url, record)
influxConfig := &core.InfluxdbConfig{
Url: os.Getenv("INFLUXDB_URL"),
Database: os.Getenv("INFLUXDB_DATABASE"),
Username: os.Getenv("INFLUXDB_USERNAME"),
Password: os.Getenv("INFLUXDB_PASSWORD")}
convertedRecord := core.ConvertRecord(record)
core.WriteToInflux(convertedRecord, influxConfig)
}
|
package cmd
import (
"github.com/spf13/cobra"
e "github.com/cloudposse/atmos/internal/exec"
u "github.com/cloudposse/atmos/pkg/utils"
)
// describeAffectedCmd produces a list of the affected Atmos components and stacks given two Git commits
var describeDependentsCmd = &cobra.Command{
Use: "dependents",
Aliases: []string{"dependants"},
Short: "Execute 'describe dependents' command",
Long: `This command produces a list of Atmos components in Atmos stacks that depend on the provided Atmos component: atmos describe dependents [options]`,
FParseErrWhitelist: struct{ UnknownFlags bool }{UnknownFlags: false},
Run: func(cmd *cobra.Command, args []string) {
err := e.ExecuteDescribeDependentsCmd(cmd, args)
if err != nil {
u.LogErrorAndExit(err)
}
},
}
func init() {
describeDependentsCmd.DisableFlagParsing = false
describeDependentsCmd.PersistentFlags().StringP("stack", "s", "", "atmos describe dependents <component> -s <stack>")
describeDependentsCmd.PersistentFlags().StringP("format", "f", "json", "The output format: atmos describe dependents <component> -s <stack> --format=json|yaml ('json' is default)")
describeDependentsCmd.PersistentFlags().String("file", "", "Write the result to the file: atmos describe dependents <component> -s <stack> --file dependents.yaml")
err := describeDependentsCmd.MarkPersistentFlagRequired("stack")
if err != nil {
u.LogErrorAndExit(err)
}
describeCmd.AddCommand(describeDependentsCmd)
}
|
package ravendb
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCanSerializeDuration(t *testing.T) {
tests := []struct {
d time.Duration
exp string
}{
{time.Hour*24*5 + time.Hour*2, `"5.02:00:00"`},
{time.Millisecond * 5, `"00:00:00.0050000"`},
}
for _, test := range tests {
d2 := Duration(test.d)
d, err := jsonMarshal(d2)
assert.NoError(t, err)
got := string(d)
assert.Equal(t, test.exp, got)
}
}
func TestCanDeserializeDuration(t *testing.T) {
tests := []struct {
s string
exp time.Duration
}{
{`"5.02:00:00"`, time.Hour*24*5 + time.Hour*2},
{`"00:00:00.0050000"`, time.Millisecond * 5},
{`"00:00:00.005000"`, time.Millisecond * 5},
{`"00:00:00.00500"`, time.Millisecond * 5},
{`"00:00:00.1"`, time.Millisecond * 100},
}
for _, test := range tests {
var got Duration
d := []byte(test.s)
err := jsonUnmarshal(d, &got)
assert.NoError(t, err)
exp := Duration(test.exp)
assert.Equal(t, exp, got)
}
}
|
package httpx_test
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/socialpoint-labs/bsk/httpx"
"github.com/socialpoint-labs/bsk/metrics"
)
func TestInstrument_RequestsDuration(t *testing.T) {
const waitTime = 50 * time.Millisecond
const deltaTime = 2 * waitTime
t.Parallel()
a := assert.New(t)
for _, tc := range []struct {
tags metrics.Tags
expectedTags int
}{
{
expectedTags: 4,
},
{
tags: []metrics.Tag{
metrics.NewTag("test", "test-value"),
metrics.NewTag("foo", "bar"),
},
expectedTags: 6,
},
} {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(waitTime)
w.WriteHeader(http.StatusNoContent)
})
recorder := metrics.NewRecorder()
h := httpx.InstrumentDecorator(recorder)(handler)
if tc.tags != nil {
h = httpx.InstrumentDecorator(recorder, tc.tags...)(handler)
}
w := httptest.NewRecorder()
r, err := http.NewRequest("", "", nil)
a.NoError(err)
h.ServeHTTP(w, r)
timer, _ := recorder.Get("http.request_duration").(*metrics.RecorderTimer)
a.WithinDuration(timer.StartedTime(), timer.StoppedTime(), deltaTime)
a.Len(timer.Tags(), tc.expectedTags)
a.Equal(http.StatusNoContent, w.Code)
}
}
|
package main
func execOpen(input string) (string, error) {
return "TODO: Implement open executor.", nil
}
|
package sdk
import (
"bufio"
"github.com/sirupsen/logrus"
"os"
"regexp"
"strings"
"sync"
"worker/common"
)
type M3u8DownloadTool struct {
name string // 视频名
saveDir string // 保存目录
link string // m3u8链接
keyLink string // key链接
prefix string // 视频前缀
progress int // 进度
status common.STATUS // 状态
log string // 日志
}
func (tool *M3u8DownloadTool) GetStatus() common.STATUS {
return tool.status
}
func (tool *M3u8DownloadTool) GetProgress() int {
return tool.progress
}
func (tool *M3u8DownloadTool) GetLog() string {
return tool.log
}
func (tool *M3u8DownloadTool) Run() {
logrus.Debug("开始下载视频")
videoDir := strings.TrimRight(tool.saveDir, "/") + "/" + tool.name
m3u8File := videoDir + "/index.m3u8"
// 1. 下载m3u8文件
if err := common.DownLoadToFile(tool.link, m3u8File); err != nil {
tool.log += err.Error()
tool.status = common.TASK_FAILED
return
}
logrus.Debug("下载m3u8文件完成")
// 2. 下载key文件
if tool.keyLink != "" {
if err := common.DownLoadToFile(tool.keyLink, videoDir+"/"+"key.key"); err != nil {
tool.log += err.Error()
tool.status = common.TASK_FAILED
return
}
}
logrus.Debug("下载key文件完成")
// 3. 下载ts文件
rowList, err := getRowList(m3u8File)
if err != nil {
tool.log += err.Error()
tool.status = common.TASK_FAILED
return
}
tsList := getTsList(rowList)
logrus.Debug("ts文件", tsList)
prefixUrl := strings.TrimRight(tool.prefix, "/")
var wg sync.WaitGroup
var lock sync.Mutex
for _, ts := range tsList {
wg.Add(1)
go DownLoadToFileParallel(prefixUrl+"/"+ts, videoDir+"/"+ts, &wg, &lock, tool, 100/len(rowList))
}
wg.Wait()
tool.progress = 100
logrus.Debug("ts文件下载完成")
// 4. 文件中视频路径处理, 把key和ts中的路径全部替换掉
for idx, row := range rowList {
if strings.HasPrefix(row, "#EXT-X-KEY") {
re, _ := regexp.Compile("URI=\".*\"")
row = re.ReplaceAllString(row, "URI=\"key.key\"")
}
if strings.HasSuffix(row, ".ts") {
colList := strings.Split(row, "/")
row = colList[len(colList)-1]
}
rowList[idx] = row
}
// 替换后的文件覆盖到原文件
file, err := os.OpenFile(m3u8File, os.O_WRONLY, 0777)
if err != nil {
tool.status = common.TASK_FAILED
tool.log += err.Error()
return
}
newContent := strings.Join(rowList, "\n")
if _, err := file.WriteString(newContent); err != nil {
tool.status = common.TASK_FAILED
tool.log += err.Error()
return
}
if err := file.Close(); err != nil {
tool.status = common.TASK_FAILED
tool.log += err.Error()
return
}
tool.status = common.TASK_SUCCESS
}
func getTsList(rowList []string) []string {
/*
存在/20191221/Y0ViQgW4/800kb/hls/VJkC1KcU.ts情况,保存时会多级目录,需要改为一级目录,同时m3u8文件也修改
本方法只返回VJkC1KcU.ts
*/
var tsList []string
for _, row := range rowList {
if strings.HasSuffix(row, "ts") {
rowList := strings.Split(row, "/")
tsList = append(tsList, rowList[len(rowList)-1])
}
}
return tsList
}
func getRowList(file string) ([]string, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
s := bufio.NewScanner(f)
var rowList []string
for s.Scan() {
rowList = append(rowList, s.Text())
}
if err = f.Close(); err != nil {
return nil, err
}
return rowList, nil
}
func DownLoadToFileParallel(
url string,
file string,
waitGroup *sync.WaitGroup,
lock *sync.Mutex,
tool *M3u8DownloadTool,
step int,
) {
defer func() {
waitGroup.Done()
lock.Unlock()
}()
if err := common.DownLoadToFile(url, file); err != nil {
lock.Lock()
tool.status = common.TASK_FAILED
tool.log += err.Error()
} else {
lock.Lock()
tool.progress += step
}
}
func NewM3u8DownloadTool(
name string,
saveDir string,
link string,
keyLink string,
prefix string,
) *M3u8DownloadTool {
return &M3u8DownloadTool{
name,
saveDir,
link,
keyLink,
prefix,
0,
common.TASK_WAITING,
"",
}
}
|
package control
import (
"JVM-GO/ch05/instructions/base"
"JVM-GO/ch05/rtda"
)
/*
tableswitch
<0-3 byte pad>
defaultbyte1
defaultbyte2
defaultbyte3
defaultbyte4
lowbyte1
lowbyte2
lowbyte3
lowbyte4
highbyte1
highbyte2
highbyte3
highbyte4
jump offsets...
*/
// Access jump table by index and jump
type TABLE_SWITCH struct {
defaultOffset int32
low int32
high int32
jumpOffsets []int32
}
func (self *TABLE_SWITCH) FetchOperands(reader *base.BytecodeReader) {
// tableswitch指令操作码的后面有0~3字节的padding,以保证defaultOffset在字节码中的地址是4的倍数
reader.SkipPadding()
// defaultOffset对应默认情况下执行跳转所需的字节码偏移量;
// low和high记录case的取值范围;
// jumpOffsets是一个索引表,里面存放high-low+1个int值,对应各种case情况下,执行跳转所需的字节码偏移量。
self.defaultOffset = reader.ReadInt32()
self.low = reader.ReadInt32()
self.high = reader.ReadInt32()
jumpOffsetsCount := self.high - self.low + 1
self.jumpOffsets = reader.ReadInt32s(jumpOffsetsCount)
}
func (self *TABLE_SWITCH) Execute(frame *rtda.Frame) {
// 先从操作数栈中弹出一个int变量
index := frame.OperandStack().PopInt()
var offset int
// 然后看它是否在low和high给定的范围之内
if index >= self.low && index <= self.high {
// 如果在,则从jumpOffsets表中查出偏移量进行跳转
offset = int(self.jumpOffsets[index-self.low])
} else {
// 否则按照defaultOffset跳转
offset = int(self.defaultOffset)
}
base.Branch(frame, offset)
}
|
package postgresql
import (
// "database/sql"
"errors"
"fmt"
"log"
)
var Default string = "DEFAULT"
var DefaultFloat64 float64 = -1
var DefaultDate string = "0001-01-01"
func Recover() {
if r := recover(); r != nil {
fmt.Println("Panic:", r)
log.Println("\n*** Panic:", r)
}
}
func (MSR *Metrics_step_request) Select(Query, Table, Type string, Value ...interface{}) error {
if MSR.Roow.Rows, MSR.Err = Requests.Query(Query+"."+Table+"."+Type, Value...); MSR.Err != nil {
return MSR.Err
}
defer MSR.Roow.Rows.Close()
for MSR.Roow.Rows.Next() {
if MSR.Err = MSR.Roow.Rows.Scan(&MSR.MS.ID, &MSR.MS.Name, &MSR.MS.Value, &MSR.MS.Duration); MSR.Err != nil { //Узнаем какие метрики с какими шагами существуют
log.Println(MSR.Err.Error())
return MSR.Err
}
MSR.MS_ARRAY = append(MSR.MS_ARRAY, MSR.MS)
}
return nil
}
func (M *Metrics_request) Select(Query, Table, Type string, Value ...interface{}) error {
if M.Roow.Row, M.Err = Requests.QueryRow(Query+"."+Table+"."+Type, Value...); M.Err != nil {
return M.Err
}
if M.Err = M.Roow.Row.Scan(&M.M.ID, &M.M.OwnHash, &M.M.OwnName, &M.M.Date, &M.M.Value, &M.M.Step_ID, &M.M.Parameter_ID); M.Err != nil {
return M.Err
}
return M.Err //Узнаем какие метрики с какими шагами существуют
}
func (M *Metrics_request) Action(Query, Table, Type string, Value ...interface{}) error {
M.Err = Requests.ExecTransact(Query+"."+Table+"."+Type, Value...)
return M.Err
}
/* Транзакции */
//Открывает транзакцию
func (T *Transaction) Begin() error {
var err error
if T.Tx == nil {
T.Tx, err = db.Begin()
}
if err == nil {
log.Println("Открыл транзакцию")
fmt.Println("Открыл транзакцию\n")
}
return err
}
//Откатывает транзакцию
func (T *Transaction) RollBack() {
if T.Tx != nil {
T.Tx.Rollback()
log.Println("Откатил транзакцию")
fmt.Println("\nОткатил транзакцию")
}
}
//Закрывает транзакцию
func (T *Transaction) Commit() error {
err := T.Tx.Commit()
if err == nil {
T.Tx = nil
log.Println("Закрыл транзакцию")
fmt.Println("\nЗакрыл транзакцию")
}
return err
}
//Один запрос в транзакции
func (T *Transaction) Transaction_One(RETURNING bool) error {
var err error
T.HashData = nil
log.Println("\nЗапрос (TO): ", T.DataOne.Query+"."+T.DataOne.Table+"."+T.DataOne.Type, "\nПараметры: ", T.DataOne.Values)
if _, ok := Requests.requestsList[T.DataOne.Query+"."+T.DataOne.Table+"."+T.DataOne.Type]; !ok {
return errors.New("Missmatch request!")
}
if RETURNING {
err = T.Tx.Stmt(Requests.requestsList[T.DataOne.Query+"."+T.DataOne.Table+"."+T.DataOne.Type]).QueryRow(T.DataOne.Values...).Scan(&T.HashData)
} else {
_, err = T.Tx.Stmt(Requests.requestsList[T.DataOne.Query+"."+T.DataOne.Table+"."+T.DataOne.Type]).Exec(T.DataOne.Values...)
}
T.DataOne.Values = nil
return err
}
func (T *Transaction) Transaction_QTTV_One(RETURNING bool, Query, Table, Type string, Values ...interface{}) error {
var err error
T.HashData = nil
//log.Println("Запрос (TQTTVO): ", Query+"."+Table+"."+Type, "\nПараметры: ", Values)
if _, ok := Requests.requestsList[Query+"."+Table+"."+Type]; !ok {
return errors.New("Missmatch request!")
}
if RETURNING {
err = T.Tx.Stmt(Requests.requestsList[Query+"."+Table+"."+Type]).QueryRow(Values...).Scan(&T.HashData)
} else {
_, err = T.Tx.Stmt(Requests.requestsList[Query+"."+Table+"."+Type]).Exec(Values...)
}
return err
}
//Полностью закрывает используемую транзакцию
func (T *Transaction) Transaction(OPEN bool) error { //true - если надо закрыть транзакцию после выполнения
var err error
if OPEN { //Если надо открыть и закрыть ее
if T.Tx == nil { //Если она еще не была открыта
if T.Tx, err = db.Begin(); err != nil {
return err
}
defer T.Tx.Rollback()
}
}
for _, val := range T.Data {
log.Println("\nЗапрос (T): ", val.Query+"."+val.Table+"."+val.Type, "\nПараметры: ", val.Values)
if _, ok := Requests.requestsList[val.Query+"."+val.Table+"."+val.Type]; !ok {
return errors.New("Missmatch request!")
}
//fmt.Println("В транзакции:", val.Values)
if _, err = T.Tx.Stmt(Requests.requestsList[val.Query+"."+val.Table+"."+val.Type]).Exec(val.Values...); err != nil {
return err
}
}
if OPEN { //Закрыть если надо закрыть
return T.Tx.Commit()
}
T.Data, T.DataOne, T.HashData = T.Data[0:0], TransactionAction{}, nil
return nil
}
|
package main
import (
"cloud.google.com/go/civil"
"errors"
"github.com/manifoldco/promptui"
"log"
"strconv"
)
func handle(err error) {
if err != nil {
if err == promptui.ErrAbort {
log.Panic("aborted", err)
}
log.Panic(err)
}
}
func validateInt(arg string) error {
_, err := strconv.Atoi(arg)
if err != nil {
return err
}
return nil
}
func validateEffort(effort string) error {
i, err := strconv.Atoi(effort)
if err != nil {
return err
}
if i < 0 || i > 100 {
return errors.New("Must be between 0 and 100")
}
return nil
}
func validateDuration(duration string) error {
_, err := civil.ParseTime(duration)
return err
}
|
package frequence
import (
"testing"
)
func BenchmarkIoFileInfo(b *testing.B) {
for n := 0; n < b.N; n++ {
IoFileInfo("C:\\Users\\Gamer\\go\\src\\is105gruppe20\\is105-ica03\\misc\\pg100.txt")
}
}
|
package xmodel
import "github.com/ionous/sashimi/util/ident"
// ParserAction commands that beome an action
type ParserAction struct {
Action ident.Id
Commands []string
}
|
package sys
import (
"fmt"
"io/ioutil"
"os"
)
// FS is the interface to a file system.
type FS interface {
// ReadAll gets the contents of filename, or an error if the file didn't exist or there was an
// error reading it.
ReadFile(filename string) ([]byte, error)
// RemoveAll removes all of the files under the directory at name. It behaves similarly to the
// func of the same name in the os package (https://godoc.org/os#RemoveAll).
RemoveAll(name string) error
}
// RealFS returns an FS object that interacts with the real local filesystem.
func RealFS() FS {
return &realFS{}
}
type realFS struct{}
// ReadFile is the interface implementation for FS.
func (r *realFS) ReadFile(name string) ([]byte, error) {
return ioutil.ReadFile(name)
}
// RemoveAll is the interface implementation for FS.
func (r *realFS) RemoveAll(name string) error {
return os.RemoveAll(name)
}
// FakeFileNotFound is the error returned by FakeFS when a requested file isn't found.
type FakeFileNotFound struct {
Filename string
}
// Error is the error interface implementation.
func (f FakeFileNotFound) Error() string {
return fmt.Sprintf("Fake file %s not found", f.Filename)
}
// FakeFS is an in-memory FS implementation.
type FakeFS struct {
Files map[string][]byte
}
// NewFakeFS returns a FakeFS with no files.
func NewFakeFS() *FakeFS {
return &FakeFS{Files: make(map[string][]byte)}
}
// ReadFile is the FS interface implementation. It returns FakeFileNotFound if the file was not
// found in the in-memory 'filesystem' of f.
func (f *FakeFS) ReadFile(name string) ([]byte, error) {
b, ok := f.Files[name]
if !ok {
return nil, FakeFileNotFound{Filename: name}
}
return b, nil
}
// RemoveAll is the interface implementation for FS.
func (f *FakeFS) RemoveAll(name string) error {
_, ok := f.Files[name]
if !ok {
return FakeFileNotFound{Filename: name}
}
delete(f.Files, name)
return nil
}
|
// Slice2 project doc.go
/*
Slice2 document
*/
package main
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"context"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
const configGossipTTL = 0 // does not expire
func (r *Replica) gossipFirstRange(ctx context.Context) {
r.mu.Lock()
defer r.mu.Unlock()
r.gossipFirstRangeLocked(ctx)
}
func (r *Replica) gossipFirstRangeLocked(ctx context.Context) {
// Gossip is not provided for the bootstrap store and for some tests.
if r.store.Gossip() == nil {
return
}
log.Event(ctx, "gossiping sentinel and first range")
if log.V(1) {
log.Infof(ctx, "gossiping sentinel from store %d, r%d", r.store.StoreID(), r.RangeID)
}
if err := r.store.Gossip().AddInfo(
gossip.KeySentinel, r.store.ClusterID().GetBytes(),
r.store.cfg.SentinelGossipTTL()); err != nil {
log.Errorf(ctx, "failed to gossip sentinel: %+v", err)
}
if log.V(1) {
log.Infof(ctx, "gossiping first range from store %d, r%d: %s",
r.store.StoreID(), r.RangeID, r.mu.state.Desc.Replicas())
}
if err := r.store.Gossip().AddInfoProto(
gossip.KeyFirstRangeDescriptor, r.mu.state.Desc, configGossipTTL); err != nil {
log.Errorf(ctx, "failed to gossip first range metadata: %+v", err)
}
}
// shouldGossip returns true if this replica should be gossiping. Gossip is
// inherently inconsistent and asynchronous, we're using the lease as a way to
// ensure that only one node gossips at a time.
func (r *Replica) shouldGossip(ctx context.Context) bool {
return r.OwnsValidLease(ctx, r.store.Clock().NowAsClockTimestamp())
}
// MaybeGossipSystemConfig scans the entire SystemConfig span and gossips it.
// Further calls come from the trigger on EndTxn or range lease acquisition.
//
// Note that MaybeGossipSystemConfig gossips information only when the
// lease is actually held. The method does not request a range lease
// here since RequestLease and applyRaftCommand call the method and we
// need to avoid deadlocking in redirectOnOrAcquireLease.
//
// MaybeGossipSystemConfig must only be called from Raft commands
// (which provide the necessary serialization to avoid data races).
//
// TODO(nvanbenschoten,bdarnell): even though this is best effort, we
// should log louder when we continually fail to gossip system config.
func (r *Replica) MaybeGossipSystemConfig(ctx context.Context) error {
if r.store.Gossip() == nil {
log.VEventf(ctx, 2, "not gossiping system config because gossip isn't initialized")
return nil
}
if !r.IsInitialized() {
log.VEventf(ctx, 2, "not gossiping system config because the replica isn't initialized")
return nil
}
if !r.ContainsKey(keys.SystemConfigSpan.Key) {
log.VEventf(ctx, 3,
"not gossiping system config because the replica doesn't contain the system config's start key")
return nil
}
if !r.shouldGossip(ctx) {
log.VEventf(ctx, 2, "not gossiping system config because the replica doesn't hold the lease")
return nil
}
// TODO(marc): check for bad split in the middle of the SystemConfig span.
loadedCfg, err := r.loadSystemConfig(ctx)
if err != nil {
if errors.Is(err, errSystemConfigIntent) {
log.VEventf(ctx, 2, "not gossiping system config because intents were found on SystemConfigSpan")
r.markSystemConfigGossipFailed()
return nil
}
return errors.Wrap(err, "could not load SystemConfig span")
}
if gossipedCfg := r.store.Gossip().GetSystemConfig(); gossipedCfg != nil && gossipedCfg.Equal(loadedCfg) &&
r.store.Gossip().InfoOriginatedHere(gossip.KeySystemConfig) {
log.VEventf(ctx, 2, "not gossiping unchanged system config")
// Clear the failure bit if all intents have been resolved but there's
// nothing new to gossip.
r.markSystemConfigGossipSuccess()
return nil
}
log.VEventf(ctx, 2, "gossiping system config")
if err := r.store.Gossip().AddInfoProto(gossip.KeySystemConfig, loadedCfg, 0); err != nil {
return errors.Wrap(err, "failed to gossip system config")
}
r.markSystemConfigGossipSuccess()
return nil
}
// MaybeGossipSystemConfigIfHaveFailure is a trigger to gossip the system config
// due to an abort of a transaction keyed in the system config span. It will
// call MaybeGossipSystemConfig if failureToGossipSystemConfig is true.
func (r *Replica) MaybeGossipSystemConfigIfHaveFailure(ctx context.Context) error {
r.mu.RLock()
failed := r.mu.failureToGossipSystemConfig
r.mu.RUnlock()
if !failed {
return nil
}
return r.MaybeGossipSystemConfig(ctx)
}
// MaybeGossipNodeLiveness gossips information for all node liveness
// records stored on this range. To scan and gossip, this replica
// must hold the lease to a range which contains some or all of the
// node liveness records. After scanning the records, it checks
// against what's already in gossip and only gossips records which
// are out of date.
func (r *Replica) MaybeGossipNodeLiveness(ctx context.Context, span roachpb.Span) error {
if r.store.Gossip() == nil || !r.IsInitialized() {
return nil
}
if !r.ContainsKeyRange(span.Key, span.EndKey) || !r.shouldGossip(ctx) {
return nil
}
ba := roachpb.BatchRequest{}
ba.Timestamp = r.store.Clock().Now()
ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeaderFromSpan(span)})
// Call evaluateBatch instead of Send to avoid reacquiring latches.
rec := NewReplicaEvalContext(r, todoSpanSet)
rw := r.Engine().NewReadOnly()
defer rw.Close()
br, result, pErr :=
evaluateBatch(ctx, kvserverbase.CmdIDKey(""), rw, rec, nil, &ba, hlc.Timestamp{} /* lul */, true /* readOnly */)
if pErr != nil {
return errors.Wrapf(pErr.GoError(), "couldn't scan node liveness records in span %s", span)
}
if len(result.Local.EncounteredIntents) > 0 {
return errors.Errorf("unexpected intents on node liveness span %s: %+v", span, result.Local.EncounteredIntents)
}
kvs := br.Responses[0].GetInner().(*roachpb.ScanResponse).Rows
log.VEventf(ctx, 2, "gossiping %d node liveness record(s) from span %s", len(kvs), span)
for _, kv := range kvs {
var kvLiveness, gossipLiveness livenesspb.Liveness
if err := kv.Value.GetProto(&kvLiveness); err != nil {
return errors.Wrapf(err, "failed to unmarshal liveness value %s", kv.Key)
}
key := gossip.MakeNodeLivenessKey(kvLiveness.NodeID)
// Look up liveness from gossip; skip gossiping anew if unchanged.
if err := r.store.Gossip().GetInfoProto(key, &gossipLiveness); err == nil {
if gossipLiveness == kvLiveness && r.store.Gossip().InfoOriginatedHere(key) {
continue
}
}
if !r.ClusterSettings().Version.IsActive(ctx, clusterversion.NodeMembershipStatus) {
// We can't transmit liveness records with a backwards incompatible
// representation unless we're told by the user that there are no
// pre-v20.1 nodes around. We should never get here.
if kvLiveness.Membership.Decommissioned() {
log.Fatal(ctx, "programming error: illegal membership status: decommissioned")
}
}
if err := r.store.Gossip().AddInfoProto(key, &kvLiveness, 0); err != nil {
return errors.Wrapf(err, "failed to gossip node liveness (%+v)", kvLiveness)
}
}
return nil
}
var errSystemConfigIntent = errors.New("must retry later due to intent on SystemConfigSpan")
// loadSystemConfig scans the system config span and returns the system
// config.
func (r *Replica) loadSystemConfig(ctx context.Context) (*config.SystemConfigEntries, error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Timestamp = r.store.Clock().Now()
ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeaderFromSpan(keys.SystemConfigSpan)})
// Call evaluateBatch instead of Send to avoid reacquiring latches.
rec := NewReplicaEvalContext(r, todoSpanSet)
rw := r.Engine().NewReadOnly()
defer rw.Close()
br, result, pErr := evaluateBatch(
ctx, kvserverbase.CmdIDKey(""), rw, rec, nil, &ba, hlc.Timestamp{} /* lul */, true, /* readOnly */
)
if pErr != nil {
return nil, pErr.GoError()
}
if intents := result.Local.DetachEncounteredIntents(); len(intents) > 0 {
// There were intents, so what we read may not be consistent. Attempt
// to nudge the intents in case they're expired; next time around we'll
// hopefully have more luck.
// This is called from handleReadWriteLocalEvalResult (with raftMu
// locked), so disallow synchronous processing (which blocks that mutex
// for too long and is a potential deadlock).
if err := r.store.intentResolver.CleanupIntentsAsync(ctx, intents, false /* allowSync */); err != nil {
log.Warningf(ctx, "%v", err)
}
return nil, errSystemConfigIntent
}
kvs := br.Responses[0].GetInner().(*roachpb.ScanResponse).Rows
sysCfg := &config.SystemConfigEntries{}
sysCfg.Values = kvs
return sysCfg, nil
}
// getLeaseForGossip tries to obtain a range lease. Only one of the replicas
// should gossip; the bool returned indicates whether it's us.
func (r *Replica) getLeaseForGossip(ctx context.Context) (bool, *roachpb.Error) {
// If no Gossip available (some tests) or range too fresh, noop.
if r.store.Gossip() == nil || !r.IsInitialized() {
return false, roachpb.NewErrorf("no gossip or range not initialized")
}
var hasLease bool
var pErr *roachpb.Error
if err := r.store.Stopper().RunTask(
ctx, "storage.Replica: acquiring lease to gossip",
func(ctx context.Context) {
// Check for or obtain the lease, if none active.
_, pErr = r.redirectOnOrAcquireLease(ctx)
hasLease = pErr == nil
if pErr != nil {
switch e := pErr.GetDetail().(type) {
case *roachpb.NotLeaseHolderError:
// NotLeaseHolderError means there is an active lease, but only if
// the lease holder is set; otherwise, it's likely a timeout.
if e.LeaseHolder != nil {
pErr = nil
}
default:
// Any other error is worth being logged visibly.
log.Warningf(ctx, "could not acquire lease for range gossip: %s", e)
}
}
}); err != nil {
pErr = roachpb.NewError(err)
}
return hasLease, pErr
}
// maybeGossipFirstRange adds the sentinel and first range metadata to gossip
// if this is the first range and a range lease can be obtained. The Store
// calls this periodically on first range replicas.
func (r *Replica) maybeGossipFirstRange(ctx context.Context) *roachpb.Error {
if !r.IsFirstRange() {
return nil
}
// When multiple nodes are initialized with overlapping Gossip addresses, they all
// will attempt to gossip their cluster ID. This is a fairly obvious misconfiguration,
// so we error out below.
if gossipClusterID, err := r.store.Gossip().GetClusterID(); err == nil {
if gossipClusterID != r.store.ClusterID() {
log.Fatalf(
ctx, "store %d belongs to cluster %s, but attempted to join cluster %s via gossip",
r.store.StoreID(), r.store.ClusterID(), gossipClusterID)
}
}
// Gossip the cluster ID from all replicas of the first range; there
// is no expiration on the cluster ID.
if log.V(1) {
log.Infof(ctx, "gossiping cluster ID %q from store %d, r%d", r.store.ClusterID(),
r.store.StoreID(), r.RangeID)
}
if err := r.store.Gossip().AddClusterID(r.store.ClusterID()); err != nil {
log.Errorf(ctx, "failed to gossip cluster ID: %+v", err)
}
hasLease, pErr := r.getLeaseForGossip(ctx)
if pErr != nil {
return pErr
} else if !hasLease {
return nil
}
r.gossipFirstRange(ctx)
return nil
}
|
/*
* binary_search.go: Binary search for slices containing integers.
*
* For Introduction to Go, Spring 2010
* Kimmo Kulovesi <kkuloves@cs.helsinki.fi>
*/
package main
import (
"fmt"
)
// Returns the index of the (or an) element with value e
// in the sorted slice s, or -1 if no element has value e.
func search(s []int, e int) int {
left, right := 0, len(s)
for left < right {
// Avoid overflowing the integer in case s is _very_ large
middle := left + ((right - left) / 2)
if s[middle] < e {
// e, if it exists, is to the right of center
left = middle + 1
} else {
// e, if it exists, is in the middle now or to the left of it
right = middle
}
}
// To save comparisons in the loop, the test for equality is deferred here
if left < len(s) && s[left] == e {
return left
}
return -1
}
// A function to calculate the correct index for a given element in
// our generated test data where the data[i] = i * 2. Returns -1
// if the e doesn't exist in the test data at an index lower than
// sliceLength.
func indexInTestData(e int, sliceLength int) int {
if e%2 != 0 {
return -1
}
e /= 2
if e >= 0 && e < sliceLength {
return e
}
return -1
}
// The size of the test array
const testDataLength = 5
// Test the binary search implementation
func main() {
data := make([]int, testDataLength)
for i := 0; i < len(data); i++ {
// Make the test data such that data[i] = i * 2
data[i] = i * 2
}
// Test searching in slices of the data
fmt.Printf("Test data: %v\n", data)
errorCount := 0
for length := 0; length <= len(data); length++ {
s := data[0:length]
fmt.Printf("\nTesting slice of length %d\n", length)
for i := length * 2; i >= -1; i-- {
foundAt := search(s, i)
if foundAt == indexInTestData(i, len(s)) {
if foundAt >= 0 {
fmt.Printf("%d is in index %d\n", i, foundAt)
} else {
fmt.Printf("%d is not present\n", i)
}
} else {
fmt.Printf("ERROR: Got %d for element %d, should have %d\n",
foundAt, i, indexInTestData(i, len(s)))
errorCount++
}
}
}
fmt.Printf("\nThere were %d errors.\n", errorCount)
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import "context"
type activeSpanKey struct{}
// noCtx is a singleton that we use internally to unify code paths that only
// optionally take a Context. The specific construction here does not matter,
// the only thing we need is that no outside caller could ever pass this
// context in (i.e. we can't use context.Background() and the like).
var noCtx context.Context = &struct{ context.Context }{context.Background()}
// SpanFromContext returns the *Span contained in the Context, if any.
func SpanFromContext(ctx context.Context) *Span {
val := ctx.Value(activeSpanKey{})
if sp, ok := val.(*Span); ok {
return sp
}
return nil
}
// optimizedContext is an implementation of context.Context special
// cased to carry a Span under activeSpanKey{}. By making an explicit
// type we unlock optimizations that save allocations by allocating
// the optimizedContext together with the Span it eventually carries.
type optimizedContext struct {
context.Context
sp *Span
}
func (ctx *optimizedContext) Value(k interface{}) interface{} {
if k == (interface{}(activeSpanKey{})) {
return ctx.sp
}
return ctx.Context.Value(k)
}
// maybeWrapCtx returns a Context wrapping the Span, with two exceptions:
// 1. if ctx==noCtx, it's a noop
// 2. if ctx contains the noop Span, and sp is also the noop Span, elide
// allocating a new Context.
//
// If a non-nil octx is passed in, it forms the returned Context. This can
// avoid allocations if the caller is able to allocate octx together with
// the Span, as is commonly possible when StartSpanCtx is used.
func maybeWrapCtx(ctx context.Context, octx *optimizedContext, sp *Span) (context.Context, *Span) {
if ctx == noCtx {
return noCtx, sp
}
// NB: we check sp != nil explicitly because some callers want to remove a
// Span from a Context, and thus pass nil.
if sp != nil && sp.i.isNoop() {
// If the context originally had the noop span, and we would now be wrapping
// the noop span in it again, we don't have to wrap at all and can save an
// allocation.
//
// Note that applying this optimization for a nontrivial ctxSp would
// constitute a bug: A real, non-recording span might later start recording.
// Besides, the caller expects to get their own span, and will .Finish() it,
// leading to an extra, premature call to Finish().
if ctxSp := SpanFromContext(ctx); ctxSp != nil && ctxSp.i.isNoop() {
return ctx, sp
}
}
if octx != nil {
octx.Context = ctx
octx.sp = sp
return octx, sp
}
return context.WithValue(ctx, activeSpanKey{}, sp), sp
}
// ContextWithSpan returns a Context wrapping the supplied Span.
func ContextWithSpan(ctx context.Context, sp *Span) context.Context {
ctx, _ = maybeWrapCtx(ctx, nil /* octx */, sp)
return ctx
}
|
package main
import (
"reflect"
"sort"
"testing"
)
func TestBuddy(t *testing.T) {
type args struct {
start int
limit int
}
tests := []struct {
name string
args args
want []int
}{
{name: "0", args: args{start: 10, limit: 50}, want: []int{48, 75}},
{name: "1", args: args{start: 48, limit: 50}, want: []int{48, 75}},
{name: "2", args: args{start: 1071625, limit: 1103735}, want: []int{1081184, 1331967}},
{name: "3", args: args{start: 57345, limit: 90061}, want: []int{62744, 75495}},
{name: "4", args: args{start: 2693, limit: 7098}, want: []int{5775, 6128}},
{name: "5", args: args{start: 6379, limit: 8275}, want: []int{}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Buddy(tt.args.start, tt.args.limit); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Buddy() = %v, want %v", got, tt.want)
}
})
}
}
func Test_findDivisors(t *testing.T) {
type args struct {
d int
}
tests := []struct {
name string
args args
want []int
}{
{name: "48", args: args{d: 48}, want: []int{1, 2, 3, 4, 6, 8, 12, 16, 24}},
{name: "75", args: args{d: 75}, want: []int{1, 3, 5, 15, 25}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := findDivisors(tt.args.d)
sort.Ints(got)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("findDivisors() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getDivisorSum(t *testing.T) {
type args struct {
d int
}
tests := []struct {
name string
args args
want int
}{
{name: "48", args: args{d: 48}, want: 76},
{name: "76", args: args{d: 75}, want: 49},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getDivisorSum(tt.args.d); got != tt.want {
t.Errorf("getDivisorSum() = %v, want %v", got, tt.want)
}
})
}
}
|
package controller
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/mylxsw/adanos-alert/internal/extension"
"github.com/mylxsw/adanos-alert/internal/job"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/adanos-alert/internal/template"
"github.com/mylxsw/adanos-alert/pkg/misc"
"github.com/mylxsw/adanos-alert/service"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/glacier/infra"
"github.com/mylxsw/glacier/web"
"github.com/mylxsw/go-utils/str"
"github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type EventController struct {
cc infra.Resolver
}
func NewEventController(cc infra.Resolver) web.Controller {
return &EventController{cc: cc}
}
func (m *EventController) Register(router web.Router) {
router.Group("/messages", func(router web.Router) {
router.Post("/", m.AddCommonEvent).Name("events:add:common")
router.Post("/logstash/", m.AddLogstashEvent).Name("events:add:logstash")
router.Post("/grafana/", m.AddGrafanaEvent).Name("events:add:grafana")
router.Post("/prometheus/api/v1/alerts", m.AddPrometheusEvent).Name("events:add:prometheus") // url 地址末尾不包含 "/"
router.Post("/prometheus_alertmanager/", m.AddPrometheusAlertEvent).Name("events:add:prometheus-alert")
router.Post("/openfalcon/im/", m.AddOpenFalconEvent).Name("events:add:openfalcon")
router.Post("/general/", m.AddGeneralEvent).Name("events:add:general")
})
router.Group("/events", func(router web.Router) {
router.Get("/", m.Events).Name("events:all")
router.Get("/{id}/", m.Event).Name("events:one")
router.Delete("/{id}/", m.DeleteEvent).Name("events:delete")
router.Post("/{id}/matched-rules/", m.TestMatchedRules).Name("events:matched-rules")
router.Post("/{id}/reproduce/", m.ReproduceEvent).Name("events:reproduce-event")
router.Post("/", m.AddCommonEvent).Name("events:add:common")
router.Post("/logstash/", m.AddLogstashEvent).Name("events:add:logstash")
router.Post("/grafana/", m.AddGrafanaEvent).Name("events:add:grafana")
router.Post("/prometheus/api/v1/alerts", m.AddPrometheusEvent).Name("events:add:prometheus") // url 地址末尾不包含 "/"
router.Post("/prometheus_alertmanager/", m.AddPrometheusAlertEvent).Name("events:add:prometheus-alert")
router.Post("/openfalcon/im/", m.AddOpenFalconEvent).Name("events:add:openfalcon")
router.Post("/general/", m.AddGeneralEvent).Name("events:add:general")
})
router.Group("/event-relations", func(router web.Router) {
router.Get("/{id}/", m.QueryEventRelation).Name("event-relations:one")
router.Get("/{id}/notes/", m.QueryEventRelationNotes).Name("event-relations:notes")
router.Post("/{id}/notes/", m.AddEventRelationNote).Name("event-relations:notes:add")
})
router.Group("/events-count/", func(router web.Router) {
router.Get("/", m.Count).Name("events:count")
})
}
// eventsFilter some query conditions for messages
func eventsFilter(ctx web.Context) bson.M {
filter := bson.M{}
meta := ctx.Input("meta")
if meta != "" {
kv := strings.SplitN(meta, ":", 2)
if len(kv) == 1 {
filter["meta."+kv[0]] = bson.M{"$exists": true}
} else {
filter["meta."+kv[0]] = strings.TrimSpace(kv[1])
}
}
tags := template.StringTags(ctx.Input("tags"), ",")
if len(tags) > 0 {
filter["tags"] = bson.M{"$in": tags}
}
origin := ctx.Input("origin")
if origin != "" {
filter["origin"] = bson.M{"$regex": origin}
}
status := template.StringTags(ctx.Input("status"), ",")
if len(status) > 0 {
filter["status"] = bson.M{"$in": status}
}
relationIDHex := ctx.Input("relation_id")
if relationIDHex != "" {
relationID, err := primitive.ObjectIDFromHex(relationIDHex)
if err == nil {
filter["relation_ids"] = relationID
}
}
groupIDHex := ctx.Input("group_id")
if groupIDHex != "" {
groupID, err := primitive.ObjectIDFromHex(groupIDHex)
if err == nil {
filter["group_ids"] = groupID
}
}
evtIDHex := ctx.Input("event_id")
if evtIDHex != "" {
evtID, err := primitive.ObjectIDFromHex(evtIDHex)
if err == nil {
filter["_id"] = evtID
}
}
beginAt := ctx.Input("start_at")
endAt := ctx.Input("end_at")
if beginAt != "" || endAt != "" {
createdAtRange := bson.M{}
if beginAt != "" {
beginAtTs, err := time.ParseInLocation("2006-01-02 15:04:05", beginAt, time.Local)
if err != nil {
beginAtTs = time.Now()
}
createdAtRange["$gt"] = beginAtTs
}
if endAt != "" {
endAtTs, err := time.ParseInLocation("2006-01-02 15:04:05", endAt, time.Local)
if err != nil {
endAtTs = time.Now()
}
createdAtRange["$lt"] = endAtTs
}
filter["created_at"] = createdAtRange
}
return filter
}
// Count return message count for your conditions
func (m *EventController) Count(ctx web.Context, evtRepo repository.EventRepo) web.Response {
filter := eventsFilter(ctx)
eventCount, err := evtRepo.Count(filter)
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
return ctx.JSON(web.M{
"count": eventCount,
})
}
// EventsResp is a response object for Events API
type EventsResp struct {
Events []repository.Event `json:"events"`
Next int64 `json:"next"`
Search EventSearch `json:"search"`
}
// EventSearch is search conditions for messages
type EventSearch struct {
Tags []string `json:"tags"`
Meta string `json:"meta"`
Status []string `json:"status"`
Origin string `json:"origin"`
GroupID string `json:"group_id"`
}
// Events return all messages
func (m *EventController) Events(ctx web.Context, evtRepo repository.EventRepo) (*EventsResp, error) {
offset, limit := offsetAndLimit(ctx)
filter := eventsFilter(ctx)
if log.DebugEnabled() {
log.WithFields(log.Fields{"filter": filter}).Debug("events filter")
}
events, next, err := evtRepo.Paginate(filter, offset, limit)
if err != nil {
return nil, web.WrapJSONError(fmt.Errorf("query failed: %v", err), http.StatusInternalServerError)
}
for i, m := range events {
events[i].Content = template.JSONBeauty(m.Content)
}
return &EventsResp{
Events: events,
Next: next,
Search: EventSearch{
Tags: template.StringTags(ctx.Input("tags"), ","),
Meta: ctx.Input("meta"),
Status: template.StringTags(ctx.Input("status"), ","),
Origin: ctx.Input("origin"),
GroupID: ctx.Input("group_id"),
},
}, nil
}
// Event return one message
func (m *EventController) Event(ctx web.Context, eventRepo repository.EventRepo) (*repository.Event, error) {
id, err := primitive.ObjectIDFromHex(ctx.PathVar("id"))
if err != nil {
return nil, web.WrapJSONError(fmt.Errorf("invalid id: %w", err), http.StatusUnprocessableEntity)
}
event, err := eventRepo.Get(id)
if err != nil {
if err == repository.ErrNotFound {
return nil, web.WrapJSONError(fmt.Errorf("no such event: %w", err), http.StatusNotFound)
}
return nil, err
}
event.Content = template.JSONBeauty(event.Content)
return &event, nil
}
func (m *EventController) ReproduceEvent(ctx web.Context, eventRepo repository.EventRepo, eventService service.EventService) web.Response {
event, err := m.Event(ctx, eventRepo)
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
id, err := eventService.Add(context.TODO(), extension.CommonEvent{
Content: event.Content,
Meta: event.Meta,
Tags: str.Distinct(append(event.Tags, "adanos-reproduced")),
Origin: event.Origin,
})
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
return ctx.JSON(web.M{
"id": id.Hex(),
})
}
func (m *EventController) errorWrap(ctx web.Context, id primitive.ObjectID, err error) web.Response {
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
return ctx.JSON(web.M{
"id": misc.IfElse(id != primitive.NilObjectID, id.Hex(), ""),
})
}
// Add general message
func (m *EventController) AddGeneralEvent(ctx web.Context, eventService service.EventService) web.Response {
body := ctx.Request().Body()
tags := str.FilterEmpty(append(strings.Split(ctx.Input("tags"), ","), ctx.Input("tag")))
origin := ctx.Input("origin")
metas := str.Map(str.FilterEmpty(strings.Split(ctx.Input("meta"), ",")), func(item string) string {
return strings.Join(str.Map(strings.SplitN(item, ":", 2), func(item string) string { return strings.TrimSpace(item) }), ":")
})
meta := make(repository.EventMeta)
for _, m := range metas {
kv := strings.SplitN(m, ":", 2)
if len(kv) == 2 {
meta[kv[0]] = kv[1]
}
}
evt := extension.CommonEvent{
Content: string(body),
Meta: meta,
Tags: tags,
Origin: origin,
}
if ctx.Input("control.id") != "" {
evt.Control = extension.EventControl{
ID: ctx.Input("control.id"),
InhibitInterval: ctx.Input("control.inhibit_interval"),
RecoveryAfter: ctx.Input("control.recovery_after"),
}
}
id, err := eventService.Add(ctx.Context(), evt)
return m.errorWrap(ctx, id, err)
}
// Add common message
func (m *EventController) AddCommonEvent(ctx web.Context, eventService service.EventService) web.Response {
var commonMessage extension.CommonEvent
if err := ctx.Unmarshal(&commonMessage); err != nil {
return ctx.JSONError(fmt.Sprintf("invalid request: %v", err), http.StatusUnprocessableEntity)
}
id, err := eventService.Add(ctx.Context(), commonMessage)
return m.errorWrap(ctx, id, err)
}
// AddLogstashEvent Add logstash message
func (m *EventController) AddLogstashEvent(ctx web.Context, eventService service.EventService) web.Response {
commonMessage, err := extension.LogstashToCommonEvent(ctx.Request().Body(), ctx.InputWithDefault("content-field", "message"))
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
id, err := eventService.Add(ctx.Context(), *commonMessage)
return m.errorWrap(ctx, id, err)
}
// AddGrafanaEvent Add grafana message
func (m *EventController) AddGrafanaEvent(ctx web.Context, eventService service.EventService) web.Response {
commonMessage, err := extension.GrafanaToCommonEvent(ctx.Request().Body())
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
id, err := eventService.Add(ctx.Context(), *commonMessage)
return m.errorWrap(ctx, id, err)
}
// AddPrometheusEvent add prometheus alert message
func (m *EventController) AddPrometheusEvent(ctx web.Context, eventService service.EventService) web.Response {
commonMessages, err := extension.PrometheusToCommonEvents(ctx.Request().Body())
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
var lastID primitive.ObjectID
var lastErr error
for _, cm := range commonMessages {
lastID, lastErr = eventService.Add(ctx.Context(), *cm)
if lastErr != nil {
log.WithFields(log.Fields{
"message": cm,
}).Errorf("save prometheus message failed: %v", lastErr)
}
}
return m.errorWrap(ctx, lastID, lastErr)
}
// AddPrometheusAlertEvent add prometheus-alert message
func (m *EventController) AddPrometheusAlertEvent(ctx web.Context, eventService service.EventService) web.Response {
commonMessage, err := extension.PrometheusAlertToCommonEvent(ctx.Request().Body())
if err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
id, err := eventService.Add(ctx.Context(), *commonMessage)
return m.errorWrap(ctx, id, err)
}
// add open-falcon message
func (m *EventController) AddOpenFalconEvent(ctx web.Context, eventService service.EventService) web.Response {
tos := ctx.Input("tos")
content := ctx.Input("content")
if content == "" {
return ctx.JSONError("invalid request, content required", http.StatusUnprocessableEntity)
}
id, err := eventService.Add(ctx.Context(), *extension.OpenFalconToCommonEvent(tos, content))
return m.errorWrap(ctx, id, err)
}
// TestMatchedRules 测试 message 匹配哪些规则
func (m *EventController) TestMatchedRules(ctx web.Context, msgRepo repository.EventRepo, ruleRepo repository.RuleRepo) ([]job.MatchedRule, error) {
msgID, err := primitive.ObjectIDFromHex(ctx.PathVar("id"))
if err != nil {
return nil, errors.Wrap(err, "invalid message id")
}
message, err := msgRepo.Get(msgID)
if err != nil {
if err == repository.ErrNotFound {
return nil, errors.Wrap(err, "no such message")
}
return nil, errors.Wrap(err, "query message failed")
}
return job.BuildEventMatchTest(ruleRepo)(message)
}
// QueryEventRelation 查询事件关联
func (m *EventController) QueryEventRelation(ctx web.Context, evtRelationRepo repository.EventRelationRepo) (*repository.EventRelation, error) {
relationID := ctx.PathVar("id")
if relationID == "" {
return nil, errors.New("relation id is required")
}
relID, err := primitive.ObjectIDFromHex(relationID)
if err != nil {
return nil, errors.Wrap(err, "invalid relation id")
}
rel, err := evtRelationRepo.Get(context.TODO(), relID)
if err != nil {
return nil, errors.Wrap(err, "query relation failed")
}
return &rel, nil
}
// EventRelationNotesResp 事件关联备注响应对象
type EventRelationNotesResp struct {
Notes []repository.EventRelationNote `json:"notes"`
Next int64 `json:"next"`
}
// QueryEventRelationNotes 查询事件关联的备注
func (m *EventController) QueryEventRelationNotes(ctx web.Context, evtRelationNoteRepo repository.EventRelationNoteRepo) (*EventRelationNotesResp, error) {
relID, err := primitive.ObjectIDFromHex(ctx.PathVar("id"))
if err != nil {
return nil, errors.Wrap(err, "invalid relation id")
}
offset, limit := offsetAndLimit(ctx)
notes, next, err := evtRelationNoteRepo.PaginateNotes(context.TODO(), relID, bson.M{}, offset, limit)
if err != nil {
return nil, err
}
return &EventRelationNotesResp{
Notes: notes,
Next: next,
}, nil
}
// AddEventRelationNote 添加事件关联备注
func (m *EventController) AddEventRelationNote(ctx web.Context, evtRelationNoteRepo repository.EventRelationNoteRepo) web.Response {
relID, err := primitive.ObjectIDFromHex(ctx.PathVar("id"))
if err != nil {
return ctx.JSONError(err.Error(), http.StatusUnprocessableEntity)
}
note := ctx.Input("note")
eventID, _ := primitive.ObjectIDFromHex(ctx.Input("event_id"))
id, err := evtRelationNoteRepo.AddNote(context.TODO(), repository.EventRelationNote{
RelationID: relID,
EventID: eventID,
Note: note,
// TODO 创建人功能暂时不可用,需要待用户权限体系建立后使用
CreatorID: primitive.NilObjectID,
CreatorName: "Default",
})
if err != nil {
return ctx.JSONError(err.Error(), http.StatusUnprocessableEntity)
}
return ctx.JSON(web.M{
"id": id,
})
}
// DeleteEvent 删除事件
func (m *EventController) DeleteEvent(ctx web.Context, evtRepo repository.EventRepo) web.Response {
eventID, err := primitive.ObjectIDFromHex(ctx.PathVar("id"))
if err != nil {
return ctx.JSONError("invalid event id", http.StatusUnprocessableEntity)
}
if err := evtRepo.DeleteID(eventID); err != nil {
return ctx.JSONError(err.Error(), http.StatusInternalServerError)
}
return ctx.JSON(web.M{})
}
|
package writeapi
type Persistence interface {
Store(Paste) error
DeleteExpired() error
}
type Paste struct {
ExpireTime int64
Route string
Text string
Created int64
}
|
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetExportExecutionStatusObjectParams creates a new GetExportExecutionStatusObjectParams object
// with the default values initialized.
func NewGetExportExecutionStatusObjectParams() *GetExportExecutionStatusObjectParams {
var ()
return &GetExportExecutionStatusObjectParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetExportExecutionStatusObjectParamsWithTimeout creates a new GetExportExecutionStatusObjectParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetExportExecutionStatusObjectParamsWithTimeout(timeout time.Duration) *GetExportExecutionStatusObjectParams {
var ()
return &GetExportExecutionStatusObjectParams{
timeout: timeout,
}
}
/*GetExportExecutionStatusObjectParams contains all the parameters to send to the API endpoint
for the get export execution status object operation typically these are written to a http.Request
*/
type GetExportExecutionStatusObjectParams struct {
/*ExecutionID*/
ExecutionID *string
/*ExportID*/
ExportID *string
timeout time.Duration
}
// WithExecutionID adds the executionId to the get export execution status object params
func (o *GetExportExecutionStatusObjectParams) WithExecutionID(ExecutionID *string) *GetExportExecutionStatusObjectParams {
o.ExecutionID = ExecutionID
return o
}
// WithExportID adds the exportId to the get export execution status object params
func (o *GetExportExecutionStatusObjectParams) WithExportID(ExportID *string) *GetExportExecutionStatusObjectParams {
o.ExportID = ExportID
return o
}
// WriteToRequest writes these params to a swagger request
func (o *GetExportExecutionStatusObjectParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
r.SetTimeout(o.timeout)
var res []error
if o.ExecutionID != nil {
// path param executionId
if err := r.SetPathParam("executionId", *o.ExecutionID); err != nil {
return err
}
}
if o.ExportID != nil {
// path param exportId
if err := r.SetPathParam("exportId", *o.ExportID); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
// SPDX-License-Identifier: MIT
package protocol
import (
"strings"
"github.com/caixw/apidoc/v7/core"
)
// ResourceOperationKind the kind of resource operations supported by the client.
type ResourceOperationKind string
const (
// ResourceOperationKindCreate supports creating new files and folders.
ResourceOperationKindCreate ResourceOperationKind = "create"
// ResourceOperationKindRename supports renaming existing files and folders.
ResourceOperationKindRename ResourceOperationKind = "rename"
// ResourceOperationKindDelete supports deleting existing files and folders.
ResourceOperationKindDelete ResourceOperationKind = "delete"
)
// FailureHandlingKind 定义出错后的处理方式
type FailureHandlingKind string
const (
// FailureHandlingKindAbort applying the workspace change is simply aborted
// if one of the changes provided fails.
// All operations executed before the failing operation stay executed.
FailureHandlingKindAbort FailureHandlingKind = "abort"
// FailureHandlingKindTransactional all operations are executed transactionally.
// That means they either all succeed or no changes at all are applied to the workspace.
FailureHandlingKindTransactional FailureHandlingKind = "transactional"
// FailureHandlingKindTextOnlyTransactional if the workspace edit contains only textual
// file changes they are executed transactionally. If resource changes (create, rename or delete file)
// are part of the change the failure handling strategy is abort.
FailureHandlingKindTextOnlyTransactional FailureHandlingKind = "textOnlyTransactional"
// FailureHandlingKindUndo The client tries to undo the operations already executed.
// But there is no guarantee that this succeeds.
FailureHandlingKindUndo FailureHandlingKind = "undo"
)
// WorkspaceClientCapabilities 客户有关 workspace 的支持情况
type WorkspaceClientCapabilities struct {
// The client supports applying batch edits to the workspace by supporting
// the request 'workspace/applyEdit'
ApplyEdit bool `json:"applyEdit,omitempty"`
// Capabilities specific to `WorkspaceEdit`s
WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
// Capabilities specific to the `workspace/didChangeConfiguration` notification.
DidChangeConfiguration *struct {
// Whether formatting supports dynamic registration.
DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
} `json:"didChangeConfiguration,omitempty"`
// Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
DidChangeWatchedFiles *struct {
// Whether formatting supports dynamic registration.
DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
} `json:"didChangeWatchedFiles,omitempty"`
// The client has support for workspace folders.
//
// Since 3.6.0
WorkspaceFolders bool `json:"workspaceFolders,omitempty"`
// The client supports `workspace/configuration` requests.
//
// Since 3.6.0
Configuration bool `json:"configuration,omitempty"`
}
// WorkspaceProvider 服务端有关 workspace 的支持情况
type WorkspaceProvider struct {
// The server supports workspace folder.
//
// Since 3.6.0
WorkspaceFolders *WorkspaceFoldersServerCapabilities `json:"workspaceFolders,omitempty"`
}
// WorkspaceFolder 项目文件夹
type WorkspaceFolder struct {
// The associated URI for this workspace folder.
URI core.URI `json:"uri"`
// The name of the workspace folder. Used to refer to this
// workspace folder in the user interface.
Name string `json:"name"`
}
// DidChangeWorkspaceFoldersParams workspace/didChangeWorkspaceFolders 参数
type DidChangeWorkspaceFoldersParams struct {
// The actual workspace folder change event.
Event WorkspaceFoldersChangeEvent `json:"event"`
}
// WorkspaceFoldersChangeEvent the workspace folder change event.
type WorkspaceFoldersChangeEvent struct {
// The array of added workspace folders
Added []WorkspaceFolder `json:"added"`
// The array of the removed workspace folders
Removed []WorkspaceFolder `json:"removed"`
}
// WorkspaceFoldersServerCapabilities 服务端有关项目文件夹的支持情况
type WorkspaceFoldersServerCapabilities struct {
// The server has support for workspace folders
Supported bool `json:"supported,omitempty"`
// Whether the server wants to receive workspace folder
// change notifications.
//
// If a string is provided, the string is treated as an ID
// under which the notification is registered on the client
// side. The ID can be used to unregister for these events
// using the `client/unregisterCapability` request.
//
// string | boolean;
ChangeNotifications bool `json:"changeNotifications,omitempty"`
}
// WorkspaceEditClientCapabilities the capabilities of a workspace edit has evolved over the time.
// Clients can describe their support using the following client capability
type WorkspaceEditClientCapabilities struct {
// The client supports versioned document changes in `WorkspaceEdit`s
DocumentChanges bool `json:"documentChanges,omitempty"`
// The resource operations the client supports. Clients should at least
// support 'create', 'rename' and 'delete' files and folders.
//
// @since 3.13.0
ResourceOperations []ResourceOperationKind `json:"resourceOperations,omitempty"`
// The failure handling strategy of a client if applying the workspace edit fails.
//
// @since 3.13.0
FailureHandling FailureHandlingKind `json:"failureHandling,omitempty"`
}
// Contains 当前 WorkspaceFolder 是否包含了 uri 这个文件或是目录
func (f WorkspaceFolder) Contains(path core.URI) bool {
fs, fp := f.URI.Parse()
ps, pp := path.Parse()
switch {
case (fs == core.SchemeFile || fs == "") && (ps == core.SchemeFile || ps == ""):
return strings.HasPrefix(pp, fp)
case fs == core.SchemeHTTP && ps == core.SchemeHTTP:
return strings.HasPrefix(pp, fp)
case fs == core.SchemeHTTPS && ps == core.SchemeHTTPS:
return strings.HasPrefix(pp, fp)
default:
return false
}
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"testing"
"github.com/compose-spec/compose-go/types"
"gotest.tools/v3/assert"
)
func TestFilterServices(t *testing.T) {
p := &types.Project{
Services: []types.ServiceConfig{
{
Name: "foo",
Links: []string{"bar"},
},
{
Name: "bar",
DependsOn: map[string]types.ServiceDependency{
"zot": {},
},
},
{
Name: "zot",
},
{
Name: "qix",
},
},
}
err := p.ForServices([]string{"bar"})
assert.NilError(t, err)
assert.Equal(t, len(p.Services), 2)
_, err = p.GetService("bar")
assert.NilError(t, err)
_, err = p.GetService("zot")
assert.NilError(t, err)
}
|
package main
import (
"fmt"
"io"
"bytes"
"os"
)
//w is a interface with nil type and nil value
var w io.Writer
//一个包含nil指针的接口不是nil接口
func f(out io.Writer) {
if out != nil {
fmt.Println("out is not nil")
} else {
fmt.Println("out is nil")
}
return
}
func main() {
if w == nil {
fmt.Println("w is nil")
}
// buf is a interface with buffr type and nil value
var buf *bytes.Buffer
// is ok
var buf2 io.Writer
f(buf)
f(buf2)
//x.(T) 类型断言
var w2 io.Writer = os.Stdout
if _, ok := w2.(*os.File); ok {
// ...use f...
fmt.Println("use f as a file")
}
return
}
|
package anilist
import (
"strings"
)
type User struct {
Id int `json:"id"`
Name string `json:"name"`
About string `json:"about"`
BannerImage string `json:"bannerImage"`
Stats UserStats `json:"stats"`
UnreadNotificationCount int `json:"unreadNotificationCount"`
SiteUrl string `json:"siteUrl"`
DonatorTier int `json:"donatorTier"`
ModeratorStatus string `json:"moderatorStatus"`
UpdatedAt int `json:"updatedAt"`
MediaListOptions MediaListOptions `json:"mediaListOptions"`
}
type UserStats struct {
WatchedTime int `json:"watchedTime"`
}
type MediaListOptions struct {
ScoreFormat ScoreFormat `json:"scoreFormat"`
}
type ScoreFormat string
const (
Point100 ScoreFormat = "POINT_100"
Point10Decimal ScoreFormat = "POINT_10_DECIMAL"
Point10 ScoreFormat = "POINT_10"
Point5 ScoreFormat = "POINT_5"
Point3 ScoreFormat = "POINT_3"
)
type MediaListCollection struct {
Lists []MediaListGroup `json:"lists"`
}
type MediaListGroup struct {
Entries []MediaListEntry `json:"entries"`
Name string `json:"name"`
IsCustomList bool `json:"isCustomList"`
IsSplitCompletedList bool `json:"isSplitCompletedList"`
Status MediaListStatus `json:"status"`
}
type MediaListEntry struct {
ListId int `json:"id"`
Status MediaListStatus `json:"status"`
Score float32 `json:"score"`
Progress int `json:"progress"`
Repeat int `json:"repeat"`
UpdatedAt int `json:"updatedAt"`
MediaDeficient `json:"media"`
}
type MediaDeficient struct {
Id int `json:"id"`
IdMal int `json:"idMal"`
Title MediaTitle `json:"title"`
Type string `json:"type"`
Format string `json:"format"`
Status string `json:"status"`
Season string `json:"season"`
Episodes int `json:"episodes"`
Duration int `json:"duration"`
Synonyms []string `json:"synonyms"`
}
type MediaFull struct {
Id int `json:"id"`
IdMal int `json:"idMal"`
Title MediaTitle `json:"title"`
Type MediaType `json:"type"`
Format string `json:"format"`
Status string `json:"status"`
Description string `json:"description"`
StartDate FuzzyDate `json:"startDate"`
EndDate FuzzyDate `json:"endDate"`
Season string `json:"season"`
Episodes int `json:"episodes"`
Duration int `json:"duration"`
Chapters int `json:"chapters"`
Volumes int `json:"volumes"`
CountryOfOrigin string `json:"countryOfOrigin"`
IsLicensed bool `json:"isLicensed"`
Source string `json:"source"`
HashTag string `json:"hashtag"`
Trailer MediaTrailer `json:"trailer"`
UpdatedAt int `json:"updatedAt"`
CoverImage MediaCoverImage `json:"coverImage"`
BannerImage string `json:"bannerImage"`
Genres []string `json:"genres"`
Synonyms []string `json:"synonyms"`
AverageScore int `json:"averageScore"`
MeanScore int `json:"meanScore"`
Popularity int `json:"popularity"`
Trending int `json:"trending"`
Tags []MediaTag `json:"tags"`
IsFavourite bool `json:"isFavourite"`
IsAdult bool `json:"isAdult"`
NextAiringEpisode AiringSchedule `json:"nextAiringEpisode"`
SiteUrl string `json:"siteUrl"`
}
type MediaTitle struct {
Romaji string `json:"romaji"`
English string `json:"english"`
Native string `json:"native"`
UserPreferred string `json:"userPreferred"`
}
type MediaType string
const (
Anime = MediaType("ANIME")
Mange = MediaType("MANGA")
)
type FuzzyDate struct {
Year int `json:"year"`
Month int `json:"month"`
Day int `json:"day"`
}
type AiringSchedule struct {
Id int `json:"id"`
AiringAt int `json:"airingAt"`
TimeUntilAiring int `json:"timeUntilAiring"`
Episode int `json:"episode"`
}
type MediaTrailer struct {
Id string `json:"id"`
Site string `json:"site"`
}
type MediaCoverImage struct {
Large string `json:"large"`
Medium string `json:"medium"`
}
type MediaTag struct {
Id int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Category string `json:"category"`
Rank int `json:"rank"`
IsGeneralSpoiler bool `json:"isGeneralSpoiler"`
IsMediaSpoiler bool `json:"isMediaSpoiler"`
IsAdult bool `json:"isAdult"`
}
type GqlError struct {
Message string `json:"message"`
Status int `json:"status"`
Locations []Location `json:"locations"`
}
type Location struct {
Line int `json:"line"`
Column int `json:"column"`
}
type MediaListStatus string
const (
All MediaListStatus = ""
Current MediaListStatus = "CURRENT"
Planning MediaListStatus = "PLANNING"
Completed MediaListStatus = "COMPLETED"
Dropped MediaListStatus = "DROPPED"
Paused MediaListStatus = "PAUSED"
Repeating MediaListStatus = "REPEATING"
)
func (status MediaListStatus) String() string {
if status == All {
return ""
} else if status == Current {
return "Watching"
} else {
return string(status[0]) + strings.ToLower(string(status[1:]))
}
}
type AiringNotification struct {
Id int `json:"id"`
Type string `json:"type"`
AnimeId int `json:"animeId"`
Episode int `json:"episode"`
Contexts []string `json:"contexts"`
CreatedAt int `json:"createdAt"`
Title struct {
MediaTitle `json:"title"`
} `json:"media"`
}
|
// Copyright 2020 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/xmidt-org/ears/internal/pkg/appsecret"
"github.com/xmidt-org/ears/internal/pkg/quota"
"github.com/xmidt-org/ears/internal/pkg/rtsemconv"
"github.com/xmidt-org/ears/internal/pkg/syncer"
"github.com/xmidt-org/ears/pkg/logs"
"github.com/xmidt-org/ears/pkg/panics"
"github.com/xmidt-org/ears/pkg/secret"
"github.com/xmidt-org/ears/pkg/tenant"
"go.opentelemetry.io/otel"
"sync"
"time"
"github.com/google/uuid"
"github.com/xmidt-org/ears/pkg/event"
pkgevent "github.com/xmidt-org/ears/pkg/event"
pkgfilter "github.com/xmidt-org/ears/pkg/filter"
pkgmanager "github.com/xmidt-org/ears/pkg/plugin/manager"
pkgreceiver "github.com/xmidt-org/ears/pkg/receiver"
pkgsender "github.com/xmidt-org/ears/pkg/sender"
)
type manager struct {
sync.Mutex
pm pkgmanager.Manager
receivers map[string]pkgreceiver.Receiver
receiversCount map[string]int
receiversWrapped map[string]*receiver
receiversFn map[string]map[string]pkgreceiver.NextFn // map[receiverKey]map[wrapperID] -> nextFN
filters map[string]pkgfilter.Filterer
filtersCount map[string]int
filtersWrapped map[string]*filter
senders map[string]pkgsender.Sender
sendersCount map[string]int
sendersWrapped map[string]*sender
nextFnDeadline time.Duration
logger *zerolog.Logger
quotaManager *quota.QuotaManager
tenantStorer tenant.TenantStorer
secrets secret.Vault
tableSyncer syncer.DeltaSyncer
}
// === Initialization ================================================
const (
defaultNextFnDeadline = 5 * time.Second
pluginRegistrationDeadline = 5 * time.Second
)
func NewManager(options ...ManagerOption) (Manager, error) {
m := manager{
receivers: map[string]pkgreceiver.Receiver{},
receiversCount: map[string]int{},
receiversWrapped: map[string]*receiver{},
receiversFn: map[string]map[string]pkgreceiver.NextFn{},
nextFnDeadline: defaultNextFnDeadline,
filters: map[string]pkgfilter.Filterer{},
filtersCount: map[string]int{},
filtersWrapped: map[string]*filter{},
senders: map[string]pkgsender.Sender{},
sendersCount: map[string]int{},
sendersWrapped: map[string]*sender{},
}
var err error
for _, option := range options {
err = option(&m)
if err != nil {
return nil, &OptionError{Err: err}
}
}
return &m, nil
}
// === Receivers =====================================================
func (m *manager) Receiverers() map[string]pkgreceiver.NewReceiverer {
if m.pm == nil {
return map[string]pkgreceiver.NewReceiverer{}
}
return m.pm.Receiverers()
}
func (m *manager) RegisterReceiver(
ctx context.Context, plugin string,
name string, config interface{},
tid tenant.Id,
) (pkgreceiver.Receiver, error) {
ns, err := m.pm.Receiverer(plugin)
if err != nil {
return nil, &RegistrationError{
Message: "could not get plugin",
Plugin: plugin,
Name: name,
Err: err,
}
}
hash, err := ns.ReceiverHash(config)
if err != nil {
return nil, &RegistrationError{
Message: "could not generate hash",
Plugin: plugin,
Name: name,
Err: err,
}
}
key := m.mapkey(tid, name, hash)
m.Lock()
defer m.Unlock()
r, ok := m.receivers[key]
if !ok {
var secrets secret.Vault
if m.secrets != nil {
secrets = appsecret.NewTenantConfigVault(tid, m.secrets, m.tenantStorer)
}
receiverChan := make(chan pkgreceiver.Receiver, 1)
go func() {
r, err = ns.NewReceiver(tid, plugin, name, config, secrets, m.tableSyncer)
receiverChan <- r
}()
select {
case rcv := <-receiverChan:
r = rcv
case <-time.After(pluginRegistrationDeadline):
err = errors.New("receiver registration timed out")
}
if err != nil {
return nil, &RegistrationError{
Message: "could not create new receiver",
Plugin: plugin,
Name: name,
Err: err,
}
}
log.Ctx(ctx).Info().Str("op", "RegisterReceiver").Str("key", key).Str("name", name).Msg("Creating NewReceiver")
m.receivers[key] = r
m.receiversCount[key] = 0
m.receiversFn[key] = map[string]pkgreceiver.NextFn{}
go func() {
err := r.Receive(func(e event.Event) {
defer func() {
p := recover()
if p != nil {
panicErr := panics.ToError(p)
log.Ctx(e.Context()).Error().Str("op", "receiverNext").Str("error", panicErr.Error()).
Str("stackTrace", panicErr.StackTrace()).Msg("A panic has occurred")
}
}()
if m.quotaManager != nil {
//ratelimit
tracer := otel.Tracer(rtsemconv.EARSTracerName)
_, span := tracer.Start(e.Context(), "rateLimit")
err = m.quotaManager.Wait(e.Context(), tid)
span.End()
if err != nil {
log.Ctx(e.Context()).Debug().Str("op", "receiverNext").Str("tenantId", tid.ToString()).Msg("Tenant Ratelimited")
e.Nack(err)
return
}
}
m.next(key, e)
})
if err != nil {
m.logger.Error().Str("op", "RegisterReceiver.Receive").Err(err).Str("key", key).Str("name", name).Msg("Error calling Receive function")
// most errors are captured during registration
}
}()
}
u, err := uuid.NewRandom()
if err != nil {
return nil, &RegistrationError{
Message: "could not generate unique id",
Plugin: plugin,
Name: name,
Err: err,
}
}
w := &receiver{
id: u.String(),
tid: tid,
name: name,
plugin: plugin,
hash: hash,
manager: m,
receiver: r,
active: true,
}
m.receiversWrapped[w.id] = w
m.receiversCount[key]++
log.Ctx(ctx).Info().Str("op", "RegisterReceiver").Str("key", key).Str("wid", w.id).Str("name", name).Msg("Receiver registered")
return w, nil
}
func (m *manager) Receivers() map[string]pkgreceiver.Receiver {
m.Lock()
defer m.Unlock()
receivers := map[string]pkgreceiver.Receiver{}
for k, v := range m.receiversWrapped {
receivers[k] = v
}
return receivers
}
func (m *manager) ReceiversStatus() map[string]ReceiverStatus {
m.Lock()
defer m.Unlock()
receivers := map[string]ReceiverStatus{}
for _, v := range m.receiversWrapped {
mapKey := m.mapkey(v.tid, v.Name(), v.hash)
status, ok := receivers[mapKey]
if ok {
status.ReferenceCount++
receivers[mapKey] = status
} else {
receivers[mapKey] = ReceiverStatus{
Name: v.Name(),
Plugin: v.Plugin(),
Config: v.Config(),
ReferenceCount: 1,
LastEventTs: v.EventTs(),
SuccessCount: v.EventSuccessCount(),
ErrorCount: v.EventErrorCount(),
SuccessVelocity: v.EventSuccessVelocity(),
ErrorVelocity: v.EventErrorVelocity(),
Tid: v.tid,
}
}
}
return receivers
}
// next iterates through all receiver functions that have registered for
// a receiver (unique by name + config hash). These must be independent,
// so no error can actually be returned to the receiver if a problem occurs.
// This must leverage the Ack() interface
func (m *manager) next(receiverKey string, e pkgevent.Event) {
if e == nil {
//TODO put metric here
m.logger.Error().Str("receiverKey", receiverKey).Msg("event is nil")
return
}
m.Lock()
nextFns := m.receiversFn[receiverKey]
for wid, n := range nextFns {
subCtx := logs.SubCtx(e.Context())
logs.StrToLogCtx(subCtx, "wid", wid)
logs.StrToLogCtx(subCtx, "receiverKey", receiverKey)
// clone creates a new event with a new sub ack tree but does not create a deepcopy of payload and metadata
// instead we are doing late deepcopy when we come across mutating filters in the route
childEvt, err := e.Clone(subCtx)
//childEvt.DeepCopy()
if err != nil {
e.Nack(err)
} else {
go func(fn pkgreceiver.NextFn, evt event.Event) {
defer func() {
p := recover()
if p != nil {
panicErr := panics.ToError(p)
log.Ctx(evt.Context()).Error().Str("op", "nextRoute").Str("error", panicErr.Error()).
Str("stackTrace", panicErr.StackTrace()).Msg("a panic has occurred")
}
}()
//log.Ctx(evt.Context()).Debug().Str("op", "nextRoute").Msg("sending event to next route")
fn(evt)
}(n, childEvt)
}
}
m.Unlock()
e.Ack()
}
func (m *manager) receive(r *receiver, nextFn pkgreceiver.NextFn) error {
r.Lock()
r.done = make(chan struct{})
r.Unlock()
m.Lock()
m.receiversFn[m.mapkey(r.tid, r.name, r.hash)][r.id] = nextFn
m.Unlock()
<-r.done
return nil
}
func (m *manager) stopReceiving(ctx context.Context, r *receiver) error {
m.Lock()
delete(m.receiversFn[m.mapkey(r.tid, r.name, r.hash)], r.id)
m.Unlock()
r.Lock()
defer r.Unlock()
if r.done == nil {
return &NotRegisteredError{}
}
if r.active {
r.active = false
close(r.done)
}
return nil
}
func (m *manager) UnregisterReceiver(ctx context.Context, pr pkgreceiver.Receiver) error {
r, ok := pr.(*receiver)
// NOTE: No locking on simple reads
if !ok || !r.active {
return &RegistrationError{
Message: fmt.Sprintf("receiver not registered %v", ok),
}
}
err := r.StopReceiving(ctx) // This in turn calls manager.stopreceiving()
if err != nil {
log.Ctx(ctx).Error().Str("op", "UnregisterReceiver").Str("r", r.name).Err(err).Msg("Error calling StopReceiving")
}
key := m.mapkey(r.tid, r.name, r.hash)
m.Lock()
defer m.Unlock()
m.receiversCount[key]--
if m.receiversCount[key] <= 0 {
go func() {
err := r.receiver.StopReceiving(ctx)
if err != nil {
m.logger.Error().Str("op", "UnregisterReceiver").Str("r", r.name).Str("key", key).Err(err).Msg("Error stopping receiver")
}
}()
log.Ctx(ctx).Info().Str("op", "UnregisterReceiver").Str("r", r.name).Str("key", key).Str("wid", r.id).Msg("receiver stopped")
delete(m.receiversCount, key)
delete(m.receivers, key)
}
delete(m.receiversWrapped, r.id)
r.Lock()
r.active = false
r.Unlock()
return nil
}
// === Filters =======================================================
func (m *manager) Filterers() map[string]pkgfilter.NewFilterer {
m.Lock()
defer m.Unlock()
if m.pm == nil {
return map[string]pkgfilter.NewFilterer{}
}
return m.pm.Filterers()
}
func (m *manager) RegisterFilter(ctx context.Context, plugin string, name string, config interface{}, tid tenant.Id) (pkgfilter.Filterer, error) {
factory, err := m.pm.Filterer(plugin)
if err != nil {
return nil, &RegistrationError{
Message: "could not get plugin",
Plugin: plugin,
Name: name,
Err: err,
}
}
hash, err := factory.FiltererHash(config)
if err != nil {
return nil, &RegistrationError{
Message: "could not generate filterer hash",
Plugin: plugin,
Name: name,
Err: err,
}
}
key := m.mapkey(tid, name, hash)
m.Lock()
defer m.Unlock()
f, ok := m.filters[key]
if !ok {
var secrets secret.Vault
if m.secrets != nil {
secrets = appsecret.NewTenantConfigVault(tid, m.secrets, m.tenantStorer)
}
filterChan := make(chan pkgfilter.Filterer, 1)
go func() {
f, err = factory.NewFilterer(tid, plugin, name, config, secrets, m.tableSyncer)
filterChan <- f
}()
select {
case flt := <-filterChan:
f = flt
case <-time.After(pluginRegistrationDeadline):
err = errors.New("filter registration timed out")
}
if err != nil {
return nil, &RegistrationError{
Message: "could not create new filterer",
Plugin: plugin,
Name: name,
Err: err,
}
}
m.filters[key] = f
m.filtersCount[key] = 0
}
u, err := uuid.NewRandom()
if err != nil {
return nil, &RegistrationError{
Message: "could not generate unique id",
Plugin: plugin,
Name: name,
Err: err,
}
}
w := &filter{
id: u.String(),
tid: tid,
name: name,
plugin: plugin,
hash: hash,
manager: m,
filterer: f,
active: true,
}
m.filtersWrapped[w.id] = w
m.filtersCount[key]++
return w, nil
}
func (m *manager) Filters() map[string]pkgfilter.Filterer {
m.Lock()
defer m.Unlock()
filters := map[string]pkgfilter.Filterer{}
for k, v := range m.filtersWrapped {
filters[k] = v
}
return filters
}
func (m *manager) FiltersStatus() map[string]FilterStatus {
m.Lock()
defer m.Unlock()
filters := map[string]FilterStatus{}
for _, v := range m.filtersWrapped {
mapKey := m.mapkey(v.tid, v.Name(), v.hash)
status, ok := filters[mapKey]
if ok {
status.ReferenceCount++
filters[mapKey] = status
} else {
filters[mapKey] = FilterStatus{
Name: v.Name(),
Plugin: v.Plugin(),
Config: v.Config(),
ReferenceCount: 1,
LastEventTs: v.EventTs(),
SuccessCount: v.EventSuccessCount(),
ErrorCount: v.EventErrorCount(),
FilterCount: v.EventFilterCount(),
SuccessVelocity: v.EventSuccessVelocity(),
ErrorVelocity: v.EventErrorVelocity(),
FilterVelocity: v.EventFilterVelocity(),
Tid: v.tid,
}
}
}
return filters
}
func (m *manager) UnregisterFilter(ctx context.Context, pf pkgfilter.Filterer) error {
f, ok := pf.(*filter)
// NOTE: No locking on simple reads
if !ok || !f.active {
return &RegistrationError{
Message: fmt.Sprintf("filter not registered %v", ok),
}
}
key := m.mapkey(f.tid, f.name, f.hash)
{
m.Lock()
m.filtersCount[key]--
if m.filtersCount[key] <= 0 {
delete(m.filtersCount, key)
delete(m.filters, key)
}
delete(m.filtersWrapped, f.id)
m.Unlock()
}
{
f.Lock()
f.active = false
f.Unlock()
}
return nil
}
// === Senders =======================================================
func (m *manager) Senderers() map[string]pkgsender.NewSenderer {
m.Lock()
defer m.Unlock()
if m.pm == nil {
return map[string]pkgsender.NewSenderer{}
}
return m.pm.Senderers()
}
func (m *manager) RegisterSender(
ctx context.Context, plugin string,
name string, config interface{},
tid tenant.Id,
) (pkgsender.Sender, error) {
ns, err := m.pm.Senderer(plugin)
if err != nil {
return nil, &RegistrationError{
Message: "could not get plugin",
Plugin: plugin,
Name: name,
Err: err,
}
}
hash, err := ns.SenderHash(config)
if err != nil {
return nil, &RegistrationError{
Message: "could not generate hash",
Plugin: plugin,
Name: name,
Err: err,
}
}
key := m.mapkey(tid, name, hash)
m.Lock()
defer m.Unlock()
s, ok := m.senders[key]
if !ok {
var secrets secret.Vault
if m.secrets != nil {
secrets = appsecret.NewTenantConfigVault(tid, m.secrets, m.tenantStorer)
}
senderChan := make(chan pkgsender.Sender, 1)
go func() {
s, err = ns.NewSender(tid, plugin, name, config, secrets, m.tableSyncer)
senderChan <- s
}()
select {
case snd := <-senderChan:
s = snd
case <-time.After(pluginRegistrationDeadline):
err = errors.New("sender registration timed out")
}
if err != nil {
return nil, &RegistrationError{
Message: "could not create sender",
Plugin: plugin,
Name: name,
Err: err,
}
}
m.senders[key] = s
m.sendersCount[key] = 0
}
u, err := uuid.NewRandom()
if err != nil {
return nil, &RegistrationError{
Message: "could not generate unique id",
Plugin: plugin,
Name: name,
Err: err,
}
}
w := &sender{
id: u.String(),
tid: tid,
name: name,
plugin: plugin,
hash: hash,
manager: m,
sender: s,
active: true,
}
m.sendersWrapped[w.id] = w
m.sendersCount[key]++
return w, nil
}
func (m *manager) Senders() map[string]pkgsender.Sender {
m.Lock()
defer m.Unlock()
senders := map[string]pkgsender.Sender{}
for k, v := range m.sendersWrapped {
senders[k] = v
}
return senders
}
func (m *manager) SendersStatus() map[string]SenderStatus {
m.Lock()
defer m.Unlock()
senders := map[string]SenderStatus{}
for _, v := range m.sendersWrapped {
mapKey := m.mapkey(v.tid, v.Name(), v.hash)
status, ok := senders[mapKey]
if ok {
status.ReferenceCount++
senders[mapKey] = status
} else {
senders[mapKey] = SenderStatus{
Name: v.Name(),
Plugin: v.Plugin(),
Config: v.Config(),
ReferenceCount: 1,
LastEventTs: v.EventTs(),
SuccessCount: v.EventSuccessCount(),
ErrorCount: v.EventErrorCount(),
SuccessVelocity: v.EventSuccessVelocity(),
ErrorVelocity: v.EventErrorVelocity(),
Tid: v.tid,
}
}
}
return senders
}
func (m *manager) UnregisterSender(ctx context.Context, ps pkgsender.Sender) error {
s, ok := ps.(*sender)
if !ok || !s.active {
return &RegistrationError{
Message: "sender not registered",
}
}
key := m.mapkey(s.tid, s.name, s.hash)
m.Lock()
m.sendersCount[key]--
if m.sendersCount[key] <= 0 {
delete(m.sendersCount, key)
delete(m.senders, key)
}
delete(m.sendersWrapped, s.id)
m.Unlock()
s.Lock()
s.active = false
s.Unlock()
return nil
}
// === Helper Functions ==============================================
func (m *manager) mapkey(tid tenant.Id, name string, hash string) string {
return tid.OrgId + "/" + tid.AppId + "/" + name + "/" + hash
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/uber-go/tally"
"github.com/uber/kraken/core"
)
func TestCAStoreInitVolumes(t *testing.T) {
require := require.New(t)
config, cleanup := CAStoreConfigFixture()
defer cleanup()
volume1, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume1)
volume2, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume2)
volume3, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume3)
config.Volumes = []Volume{
{Location: volume1, Weight: 100},
{Location: volume2, Weight: 100},
{Location: volume3, Weight: 100},
}
_, err = NewCAStore(config, tally.NoopScope)
require.NoError(err)
v1Files, err := ioutil.ReadDir(path.Join(volume1, path.Base(config.CacheDir)))
require.NoError(err)
v2Files, err := ioutil.ReadDir(path.Join(volume2, path.Base(config.CacheDir)))
require.NoError(err)
v3Files, err := ioutil.ReadDir(path.Join(volume3, path.Base(config.CacheDir)))
require.NoError(err)
n1 := len(v1Files)
n2 := len(v2Files)
n3 := len(v3Files)
// There should be 256 symlinks total, evenly distributed across the volumes.
require.Equal(256, (n1 + n2 + n3))
require.True(float32(n1)/256 > float32(0.25), "%d/256 should be >0.25", n1)
require.True(float32(n2)/256 > float32(0.25), "%d/256 should be >0.25", n2)
require.True(float32(n3)/256 > float32(0.25), "%d/256 should be >0.25", n3)
}
func TestCAStoreInitVolumesAfterChangingVolumes(t *testing.T) {
require := require.New(t)
config, cleanup := CAStoreConfigFixture()
defer cleanup()
volume1, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume1)
volume2, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume2)
volume3, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume3)
config.Volumes = []Volume{
{Location: volume1, Weight: 100},
{Location: volume2, Weight: 100},
{Location: volume3, Weight: 100},
}
_, err = NewCAStore(config, tally.NoopScope)
require.NoError(err)
// Add one more volume, recreate file store.
volume4, err := ioutil.TempDir("/tmp", "volume")
require.NoError(err)
defer os.RemoveAll(volume3)
config.Volumes = append(config.Volumes, Volume{Location: volume4, Weight: 100})
_, err = NewCAStore(config, tally.NoopScope)
require.NoError(err)
var n1, n2, n3, n4 int
links, err := ioutil.ReadDir(config.CacheDir)
require.NoError(err)
for _, link := range links {
source, err := os.Readlink(path.Join(config.CacheDir, link.Name()))
require.NoError(err)
if strings.HasPrefix(source, volume1) {
n1++
}
if strings.HasPrefix(source, volume2) {
n2++
}
if strings.HasPrefix(source, volume3) {
n3++
}
if strings.HasPrefix(source, volume4) {
n4++
}
}
// Symlinks should be recreated.
require.Equal(256, (n1 + n2 + n3 + n4))
require.True(float32(n1)/256 > float32(0.15))
require.True(float32(n2)/256 > float32(0.15))
require.True(float32(n3)/256 > float32(0.15))
require.True(float32(n4)/256 > float32(0.15))
}
func TestCAStoreCreateUploadFileAndMoveToCache(t *testing.T) {
require := require.New(t)
config, cleanup := CAStoreConfigFixture()
defer cleanup()
s, err := NewCAStore(config, tally.NoopScope)
require.NoError(err)
src := core.DigestFixture().Hex()
require.NoError(s.CreateUploadFile(src, 100))
_, err = os.Stat(path.Join(config.UploadDir, src))
require.NoError(err)
f, err := s.uploadStore.newFileOp().GetFileReader(src, 0 /* readPartSize */)
require.NoError(err)
defer f.Close()
digester := core.NewDigester()
digest, err := digester.FromReader(f)
require.NoError(err)
dst := digest.Hex()
err = s.MoveUploadFileToCache(src, dst)
require.NoError(err)
_, err = os.Stat(path.Join(config.UploadDir, src[:2], src[2:4], src))
require.True(os.IsNotExist(err))
_, err = os.Stat(path.Join(config.CacheDir, dst[:2], dst[2:4], dst))
require.NoError(err)
}
func TestCAStoreCreateUploadFileAndMoveToCacheFailure(t *testing.T) {
require := require.New(t)
config, cleanup := CAStoreConfigFixture()
defer cleanup()
s, err := NewCAStore(config, tally.NoopScope)
require.NoError(err)
src := core.DigestFixture().Hex()
require.NoError(s.CreateUploadFile(src, 100))
_, err = os.Stat(path.Join(config.UploadDir, src))
require.NoError(err)
f, err := s.uploadStore.newFileOp().GetFileReader(src, 0 /* readPartSize */)
require.NoError(err)
defer f.Close()
digester := core.NewDigester()
digest, err := digester.FromReader(f)
require.NoError(err)
dst := core.DigestFixture().Hex()
err = s.MoveUploadFileToCache(src, dst)
require.EqualError(err, fmt.Sprintf("verify digest: computed digest sha256:%s doesn't match expected value sha256:%s", digest.Hex(), dst))
_, err = os.Stat(path.Join(config.UploadDir, src[:2], src[2:4], src))
require.True(os.IsNotExist(err))
_, err = os.Stat(path.Join(config.CacheDir, dst[:2], dst[2:4], dst))
require.True(os.IsNotExist(err))
}
func TestCAStoreCreateCacheFile(t *testing.T) {
require := require.New(t)
s, cleanup := CAStoreFixture()
defer cleanup()
s1 := "buffer"
computedDigest, err := core.NewDigester().FromBytes([]byte(s1))
require.NoError(err)
r1 := strings.NewReader(s1)
err = s.CreateCacheFile(computedDigest.Hex(), r1)
require.NoError(err)
r2, err := s.GetCacheFileReader(computedDigest.Hex())
require.NoError(err)
b2, err := ioutil.ReadAll(r2)
require.Equal(s1, string(b2))
}
|
package p_00001_00100
// 98. Validate Binary Search Tree, https://leetcode.com/problems/validate-binary-search-tree/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func isValidBST(root *TreeNode) bool {
isValid := true
isFirst := true
prevValue := 0
inorderTraversal(root, &isValid, &isFirst, &prevValue)
return isValid
}
func inorderTraversal(root *TreeNode, isValid *bool, isFirst *bool, prevValue *int) {
if !(*isValid) || root == nil {
return
}
inorderTraversal(root.Left, isValid, isFirst, prevValue)
if *isFirst {
*prevValue = root.Val
*isFirst = false
} else {
if root.Val <= *prevValue {
*isValid = false
}
*prevValue = root.Val
}
inorderTraversal(root.Right, isValid, isFirst, prevValue)
}
|
package huffman
import (
"container/heap"
"fmt"
)
type HuffmanTree interface {
Freq() int
}
type HuffmanLeaf struct {
freq int
value rune //int32
}
type HuffmanNode struct {
freq int
left, right HuffmanTree
}
func (leaf *HuffmanLeaf) Freq() int {
return leaf.freq
}
func (node *HuffmanNode) Freq() int {
return node.freq
}
type TreeHeap []HuffmanTree
func (heap TreeHeap) Len() int {
return len(heap)
}
func (heap TreeHeap) Less(i, j int) bool {
return heap[i].Freq() < heap[j].Freq()
}
//压入
func (heap TreeHeap) Push(elem interface{}) {
heap = append(heap, elem.(HuffmanTree))
}
//弹出
func (heap TreeHeap) Pop() (ret interface{}) {
ret = heap[len(heap)-1]
heap = heap[:len(heap)-1]
return
}
func (heap TreeHeap) Swap(i,j int) {
heap[i], heap[j] = heap[j], heap[i]
}
func BuildTree(symbFreqs map[rune]int) HuffmanTree {
var trees TreeHeap
for c, f := range symbFreqs {
trees = append(trees, &HuffmanLeaf{
freq: f,
value: c,
})
}
heap.Init(trees) //给函数参数一个原材料,容器对象,这个容器对象需要实现一定的接口,初始化后这个容器就是按堆排序排序好了的
for trees.Len() > 1 {
a := heap.Pop(trees).(HuffmanTree)
b := heap.Pop(trees).(HuffmanTree)
heap.Push(trees, &HuffmanNode{
freq: a.Freq() + b.Freq(),
left: a,
right: b,
})
}
return heap.Pop(trees).(HuffmanTree)
}
func ShowFreq(tree HuffmanTree, prefix []byte) {
switch i := tree.(type) {
case *HuffmanLeaf:
fmt.Printf("%c\t%d\n", i.value, i.freq)
case *HuffmanNode:
}
} |
package types
import (
"testing"
cm "github.com/zhaohaijun/matrixchain/common"
)
func TestDataReqSerializationDeserialization(t *testing.T) {
var msg DataReq
msg.DataType = 0x02
hashstr := "8932da73f52b1e22f30c609988ed1f693b6144f74fed9a2a20869afa7abfdf5e"
bhash, _ := cm.HexToBytes(hashstr)
copy(msg.Hash[:], bhash)
MessageTest(t, &msg)
}
|
package redis2
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v7"
"net/http"
"os"
"testing"
"time"
)
var RedisClient *redis.Client
func init() {
host := os.Getenv("REDISHOST")
port := os.Getenv("REDISPORT")
RedisClient = NewClientWithParam(host, port)
fmt.Println("go-redis init.")
}
func A(g *gin.Context) {
r1 := put()
r2 := get()
fmt.Printf("%s\n%s\n", r1,r2)
g.String(http.StatusOK, "%s\n%s\n", r1, r2)
}
func B(g *gin.Context) {
start := time.Now().UnixNano()
cmd := RedisClient.Set(key, value, 0)
end := time.Now().UnixNano()
result := end - start
if cmd != nil && cmd.Err() != nil {
g.String(http.StatusOK, "ng %d", result/1e6)
}
g.String(http.StatusOK, "ok %d", result/1e6)
//fmt.Printf("%d \n", result/1e6)
}
const (
key = "go-redi-key"
value = "go-redi-value"
)
func put() testing.BenchmarkResult {
result := testing.Benchmark(func(b *testing.B) {
b.ResetTimer()
for i := 1; i <= b.N; i++ {
cmd := RedisClient.Set(key, value, 0)
if cmd != nil && cmd.Err() != nil {
b.Fatal(cmd.Err())
}
}
})
return result
}
func get() testing.BenchmarkResult {
result := testing.Benchmark(func(b *testing.B) {
b.ResetTimer()
for i := 1; i <= b.N; i++ {
cmd := RedisClient.Get(key)
if cmd != nil && cmd.Err() != nil {
b.Fatal(cmd.Err())
}
}
})
return result
}
func NewClientWithParam(host, port string) *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%s", host, port),
Password: "", // no password set
DB: 0, // use default DB
PoolSize:10,
MinIdleConns:10,
ReadTimeout: 1 * time.Second,
MaxRetries: 3,
DialTimeout: 2 * time.Second,
OnConnect: func(conn *redis.Conn) error {
fmt.Printf("-> Redis Connect %v\n", conn) // stdoutに出力
fmt.Printf("%v \n", conn.ClientList())
return nil
},
})
return client
} |
package optgen
import (
"bufio"
"bytes"
"fmt"
"io"
"unicode"
)
var _ = fmt.Println
type Token int
const (
ILLEGAL Token = iota
EOF
IDENT
STRING
WHITESPACE
COMMENT
LPAREN
RPAREN
LBRACKET
RBRACKET
LBRACE
RBRACE
DOLLAR
COLON
ASTERISK
EQUALS
ARROW
AMPERSAND
COMMA
CARET
ELLIPSES
PIPE
// Keywords.
DEFINE
)
type LineInfo struct {
Line int
Pos int
}
func (li LineInfo) MergeWith(other LineInfo) LineInfo {
if li.Line < other.Line {
return LineInfo{}
}
return LineInfo{}
}
type Scanner struct {
r *bufio.Reader
tok Token
lit string
lineInfo struct {
line int
pos int
prev int
}
}
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
func (s *Scanner) Token() Token {
return s.tok
}
func (s *Scanner) Literal() string {
return s.lit
}
func (s *Scanner) LineInfo() (line, pos int) {
return s.lineInfo.line + 1, s.lineInfo.pos
}
func (s *Scanner) Scan() Token {
// Read the next rune.
ch := s.read()
// If we see whitespace then consume all contiguous whitespace.
if unicode.IsSpace(ch) {
s.unread()
return s.scanWhitespace()
}
// If we see a letter then consume as an identifier or keyword.
if unicode.IsLetter(ch) {
s.unread()
return s.scanIdentifier()
}
// Otherwise read the individual character.
switch ch {
case rune(0):
s.tok = EOF
s.lit = ""
case '(':
s.tok = LPAREN
s.lit = "("
case ')':
s.tok = RPAREN
s.lit = ")"
case '[':
s.tok = LBRACKET
s.lit = "["
case ']':
s.tok = RBRACKET
s.lit = "]"
case '{':
s.tok = LBRACE
s.lit = "{"
case '}':
s.tok = RBRACE
s.lit = "}"
case '$':
s.tok = DOLLAR
s.lit = "$"
case ':':
s.tok = COLON
s.lit = ":"
case '*':
s.tok = ASTERISK
s.lit = "*"
case ',':
s.tok = COMMA
s.lit = ","
case '^':
s.tok = CARET
s.lit = "^"
case '|':
s.tok = PIPE
s.lit = "|"
case '&':
s.tok = AMPERSAND
s.lit = "&"
case '=':
if s.read() == '>' {
s.tok = ARROW
s.lit = "=>"
break
}
s.unread()
s.tok = EQUALS
s.lit = "="
case '.':
if s.read() == '.' && s.read() == '.' {
s.tok = ELLIPSES
s.lit = "..."
break
}
s.tok = ILLEGAL
s.lit = "."
case '"':
s.unread()
return s.scanStringLiteral()
case '#':
s.unread()
return s.scanComment()
default:
s.tok = ILLEGAL
s.lit = string(ch)
}
return s.tok
}
// read reads the next rune from the buffered reader.
// Returns the eof if an error occurs (or io.EOF is returned).
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return rune(0)
}
s.lineInfo.prev = s.lineInfo.pos
if ch == '\n' {
s.lineInfo.line++
s.lineInfo.pos = 0
} else {
s.lineInfo.pos++
}
return ch
}
// unread places the previously read rune back on the reader.
func (s *Scanner) unread() {
err := s.r.UnreadRune()
if err != nil {
panic(err)
}
s.tok = ILLEGAL
s.lit = ""
if s.lineInfo.pos == 0 {
s.lineInfo.line--
}
s.lineInfo.pos = s.lineInfo.prev
s.lineInfo.prev = -1
}
// scanWhitespace consumes the current rune and all contiguous whitespace.
func (s *Scanner) scanWhitespace() Token {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent whitespace character into the buffer.
// Non-whitespace characters and EOF will cause the loop to exit.
for {
ch := s.read()
if ch == rune(0) {
break
}
if !unicode.IsSpace(ch) {
s.unread()
break
}
buf.WriteRune(ch)
}
s.tok = WHITESPACE
s.lit = buf.String()
return WHITESPACE
}
// scanIdentifier consumes the current rune and all contiguous identifier runes.
func (s *Scanner) scanIdentifier() Token {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent ident character into the buffer.
// Non-ident characters and EOF will cause the loop to exit.
for {
ch := s.read()
if ch == rune(0) {
break
}
if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) {
s.unread()
break
}
buf.WriteRune(ch)
}
// If the string matches a keyword then return that keyword. Otherwise,
// return as a regular identifier.
switch buf.String() {
case "define":
s.tok = DEFINE
default:
s.tok = IDENT
}
s.lit = buf.String()
return s.tok
}
func (s *Scanner) scanStringLiteral() Token {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read characters until the closing quote is found, or until a newline or
// EOF character is read.
for {
ch := s.read()
if ch == rune(0) || ch == '\n' {
s.tok = ILLEGAL
break
}
buf.WriteRune(ch)
if ch == '"' {
s.tok = STRING
break
}
}
s.lit = buf.String()
return s.tok
}
// scanComment consumes the current rune and all characters until newline.
func (s *Scanner) scanComment() Token {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent character into the buffer until either
// newline or EOF is encountered.
for {
ch := s.read()
if ch == rune(0) {
break
}
if ch == rune(0) || ch == '\n' {
s.unread()
break
}
buf.WriteRune(ch)
}
s.tok = COMMENT
s.lit = buf.String()
return COMMENT
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package interfaces
import (
"encoding/json"
"testing"
"inet.af/netaddr"
)
func TestGetState(t *testing.T) {
st, err := GetState()
if err != nil {
t.Fatal(err)
}
j, err := json.MarshalIndent(st, "", "\t")
if err != nil {
t.Errorf("JSON: %v", err)
}
t.Logf("Got: %s", j)
t.Logf("As string: %s", st)
st2, err := GetState()
if err != nil {
t.Fatal(err)
}
if !st.EqualFiltered(st2, FilterAll) {
// let's assume nobody was changing the system network interfaces between
// the two GetState calls.
t.Fatal("two States back-to-back were not equal")
}
t.Logf("As string:\n\t%s", st)
}
func TestLikelyHomeRouterIP(t *testing.T) {
gw, my, ok := LikelyHomeRouterIP()
if !ok {
t.Logf("no result")
return
}
t.Logf("myIP = %v; gw = %v", my, gw)
}
func TestIsUsableV6(t *testing.T) {
tests := []struct {
name string
ip string
want bool
}{
{"first ULA", "fc00::1", true},
{"Tailscale", "fd7a:115c:a1e0::1", false},
{"Cloud Run", "fddf:3978:feb1:d745::1", true},
{"zeros", "0000:0000:0000:0000:0000:0000:0000:0000", false},
{"Link Local", "fe80::1", false},
{"Global", "2602::1", true},
{"IPv4 public", "192.0.2.1", false},
{"IPv4 private", "192.168.1.1", false},
}
for _, test := range tests {
if got := isUsableV6(netaddr.MustParseIP(test.ip)); got != test.want {
t.Errorf("isUsableV6(%s) = %v, want %v", test.name, got, test.want)
}
}
}
|
package main
// Leetcode 79. (medium)
func exist(board [][]byte, word string) bool {
if word == "" {
return true
}
if len(board) == 0 {
return false
}
mr, mc := len(board), len(board[0])
for i := 0; i < mr; i++ {
for j := 0; j < mc; j++ {
if board[i][j] == word[0] && dfsExist(board, i, j, word, 0) {
return true
}
}
}
return false
}
func dfsExist(board [][]byte, r, c int, word string, i int) bool {
mr, mc := len(board), len(board[0])
if r == -1 || r == mr || c == -1 || c == mc || board[r][c] != word[i] {
return false
}
if i == len(word)-1 {
return true
}
b := board[r][c]
board[r][c] = '0'
if dfsExist(board, r-1, c, word, i+1) {
return true
}
if dfsExist(board, r, c+1, word, i+1) {
return true
}
if dfsExist(board, r+1, c, word, i+1) {
return true
}
if dfsExist(board, r, c-1, word, i+1) {
return true
}
board[r][c] = b
return false
}
|
package controllers
type IndexController struct {
BaseController
}
func (c *IndexController) Get() {
c.TplName="index.html"
c.Show()
}
|
// +build !darwin
package main
import (
"errors"
"net"
)
func LaunchdSocket() (net.Listener, error) {
return nil, errors.New("launchd is only supported on darwin")
}
|
package main
import (
"fmt"
"time"
"github.com/miekg/dns"
)
func main() {
c := new(dns.Client)
c.Timeout = 1 * time.Second
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn("!runprobe"), dns.TypeA)
t := time.NewTicker(time.Second * 5)
for range t.C {
fmt.Println("TICK")
r, _, err := c.Exchange(m, "127.0.0.1:5300")
if err == nil && r.Rcode == 15 {
if !lastUp {
onServerUp()
}
} else {
if lastUp {
onServerDown()
}
}
}
}
var lastUp = false
func onServerUp() {
fmt.Println("onServerUp()")
lastUp = true
}
func onServerDown() {
fmt.Println("onServerDown()")
lastUp = false
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package analyze
import (
"context"
"path/filepath"
"strings"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/build/buildpacks"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/build/jib"
koinit "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/build/ko/init"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/docker"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/initializer/build"
)
type builderAnalyzer struct {
directoryAnalyzer
enableJibInit bool
enableJibGradleInit bool
enableKoInit bool
enableBuildpacksInit bool
findBuilders bool
buildpacksBuilder string
foundBuilders []build.InitBuilder
parentDirToStopFindJibSettings string
}
func (a *builderAnalyzer) analyzeFile(ctx context.Context, filePath string) error {
if a.findBuilders {
lookForJib := a.parentDirToStopFindJibSettings == "" || a.parentDirToStopFindJibSettings == a.currentDir
builderConfigs, lookForJib := a.detectBuilders(ctx, filePath, lookForJib)
a.foundBuilders = append(a.foundBuilders, builderConfigs...)
if !lookForJib {
a.parentDirToStopFindJibSettings = a.currentDir
}
}
return nil
}
func (a *builderAnalyzer) exitDir(dir string) {
if a.parentDirToStopFindJibSettings == dir {
a.parentDirToStopFindJibSettings = ""
}
}
// detectBuilders checks if a path is a builder config, and if it is, returns the InitBuilders representing the
// configs. Also returns a boolean marking search completion for subdirectories (true = subdirectories should
// continue to be searched, false = subdirectories should not be searched for more builders)
func (a *builderAnalyzer) detectBuilders(ctx context.Context, path string, detectJib bool) ([]build.InitBuilder, bool) {
var results []build.InitBuilder
searchSubDirectories := true
// TODO: Remove backwards compatibility if statement (not entire block)
if a.enableJibInit && detectJib {
// Check for jib
if builders := jib.Validate(ctx, path, a.enableJibGradleInit); builders != nil {
for i := range builders {
results = append(results, builders[i])
}
searchSubDirectories = false
}
}
// Check for Dockerfile
base := filepath.Base(path)
if strings.Contains(strings.ToLower(base), "dockerfile") {
if docker.Validate(path) {
results = append(results, docker.ArtifactConfig{
// Docker expects forward slashes (for Linux containers at least)
File: filepath.ToSlash(path),
})
}
}
if a.enableKoInit {
if koinit.Validate(path) {
results = append(results, koinit.ArtifactConfig{
File: path,
})
}
}
// TODO: Remove backwards compatibility if statement (not entire block)
if a.enableBuildpacksInit {
// Check for buildpacks
if buildpacks.Validate(path) {
results = append(results, buildpacks.ArtifactConfig{
File: path,
Builder: a.buildpacksBuilder,
})
}
}
return results, searchSubDirectories
}
|
package comparetriplets
// CompareTriplets ...
// Link to the task: https://www.hackerrank.com/challenges/compare-the-triplets/problem
func CompareTriplets(a []int32, b []int32) []int32 {
var r []int32
r = make([]int32, 2)
for i, v := range a {
if v > b[i] {
r[0] = r[0] + 1
} else if v < b[i] {
r[1] = r[1] + 1
}
}
return r
}
|
package messages
import "github.com/AsynkronIT/protoactor-go/actor"
type Result struct {
Pid *actor.PID
}
type UnknownResult struct {
Pid *actor.PID
}
type FailedButConsistentResult struct {
Pid *actor.PID
}
type FailedAndInconsistent struct {
Pid *actor.PID
}
type SuccessResult struct {
Pid *actor.PID
}
|
package _1_Factory_Pattern
// 工厂模式
//步骤 1
//创建一个接口:
type Shape interface {
Draw() string
}
type Color interface {
Fill() string
}
//步骤 2
//创建实现接口的实体类。
type Rectangle struct{}
type Square struct{}
type Circle struct {
Color string
X, Y, Radius int
}
func (r Rectangle) Draw() string {
return "Rectangle"
}
func (s Square) Draw() string {
return "Square"
}
func (c Circle) Draw() string {
return "Circle"
}
type Green struct{}
type Red struct{}
type Blue struct{}
func (r Blue) Fill() string {
return "Blue"
}
func (s Red) Fill() string {
return "Red"
}
func (c Green) Fill() string {
return "Green"
}
//步骤 3
//创建一个工厂,生成基于给定信息的实体类的对象。
type ShapeFactory struct{}
type ColorFactory struct{}
func (s ShapeFactory) GetShape(shape string) Shape {
switch shape {
case "Rectangle":
return new(Rectangle)
case "Square":
return new(Square)
case "Circle":
return new(Circle)
}
return nil
}
func (s ColorFactory) GetColor(color string) Color {
switch color {
case "Green":
return new(Green)
case "Red":
return new(Red)
case "Blue":
return new(Blue)
}
return nil
}
func (s ShapeFactory) GetColor(color string) Color {
return nil
}
func (s ShapeFactory) Name() string {
return "Shape"
}
func (s ColorFactory) GetShape(shape string) Shape {
return nil
}
func (s ColorFactory) Name() string {
return "color"
}
|
package app
import (
"bytes"
"fmt"
"html/template"
"log"
"net/http"
"time"
"bitbucket.com/barrettbsi/broadvid-adscoops-shared/adscoopUtils"
"bitbucket.com/barrettbsi/broadvid-adscoops-shared/structs"
"github.com/BurntSushi/toml"
"github.com/go-martini/martini"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/websocket"
"github.com/jinzhu/gorm"
"github.com/martini-contrib/render"
"github.com/martini-contrib/secure"
"github.com/martini-contrib/sessions"
)
var (
dbConn = "GO_DATABASE_CONN"
dbTable = "adscoops"
tomlFile = "config.toml"
asUtils *adscoopUtils.UtilManager
config tomlConfig
db gorm.DB
)
func loadToml() {
if _, err := toml.DecodeFile(tomlFile, &config); err != nil {
fmt.Println(err)
return
}
}
func App(isTesting bool) (m *martini.ClassicMartini, err error) {
loadToml()
db, err = gorm.Open("mysql", config.SqlConnection)
if err != nil {
log.Println("err", err)
return
}
db.DB().Ping()
db.DB().SetMaxIdleConns(10)
db.DB().SetMaxOpenConns(100)
go processFuncs()
go keepWSAlive()
asUtils = adscoopUtils.NewUtilManager(&db)
m = martini.Classic()
store := sessions.NewCookieStore([]byte("asdlg908dgslkasdgn"))
m.Use(sessions.Sessions("ascp", store))
if !isTesting {
martini.Env = martini.Prod
m.Use(secure.Secure(secure.Options{
AllowedHosts: []string{"localhost:3003", "client-dash.adscoops.com", "localhost:3009"},
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
STSSeconds: 315360000,
STSIncludeSubdomains: true,
FrameDeny: true,
ContentTypeNosniff: true,
BrowserXssFilter: true,
// ContentSecurityPolicy: "default-src 'self'",
}))
}
priFuncs := template.FuncMap{
"LoadTemplate": func(name string, data interface{}) (ret template.HTML, err error) {
var buf bytes.Buffer
t := template.Must(template.ParseFiles("templates/" + name + ".tmpl"))
err = t.Execute(&buf, data)
ret = template.HTML(buf.String())
return
},
"FormatReadableTime": func(dateTime time.Time) (ret template.HTML, err error) {
ret = template.HTML(dateTime.Format("01/02/2006 03:04 PM"))
return
},
"FormatReadableTimeLosAngeles": func(dateTime time.Time) (ret template.HTML, err error) {
pst := time.Date(dateTime.Year(), dateTime.Month(), dateTime.Day(),
dateTime.Hour(), dateTime.Minute(), 0, 0, time.UTC)
location, _ := time.LoadLocation("America/Los_Angeles")
dateTime = pst.In(location)
ret = template.HTML(dateTime.Format("01/02/2006 03:04 PM"))
return
},
"Addition": func(numone int, numtwo int) int {
return numone + numtwo
},
}
funcMap := []template.FuncMap{
priFuncs,
}
m.Use(render.Renderer(render.Options{
Layout: "layout",
Funcs: funcMap,
}))
controllersSetup(m)
m.Get("/wsupdates", requireLogin, func(w http.ResponseWriter, r *http.Request, user *UserWithPolicy) {
uid := fmt.Sprintf("%v", user.ClientID)
ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
client := ws.RemoteAddr()
sockCli := ClientConn{uid, ws, client}
addClient(sockCli)
for {
messageType, p, err := ws.ReadMessage()
if err != nil {
deleteClient(sockCli)
return
}
broadcastMessage(messageType, p, uid)
}
})
return
}
func processFuncs() {
go setupCampaignCaches()
ticker := time.NewTicker(time.Minute * 5)
for range ticker.C {
go setupCampaignCaches()
}
}
func keepWSAlive() {
ticker := time.NewTicker(time.Second * 20)
for range ticker.C {
broadcastMessageToAll(1, []byte("hi"))
}
}
func requireLogin(s sessions.Session, r render.Render, c martini.Context) {
v := s.Get("user_id")
if v == nil {
r.Redirect("/login")
return
}
var user UserWithPolicy
id := v.(uint)
err := db.Select("adscoop_client_users.*").Joins("JOIN adscoop_clients ON adscoop_clients.id = adscoop_client_users.client_id").Where("adscoop_clients.enable_client_login = 1 AND adscoop_client_users.id = ?", id).Find(&user).Error
if err != nil {
ops := sessions.Options{
MaxAge: -1,
}
s.Options(ops)
s.Clear()
r.Redirect("/login")
return
}
var client structs.AdscoopClient
db.Where("id = ?", user.ClientID).Find(&client)
user.Name = client.Name
db.Find(&user.Policy, user.UserPolicyID)
c.Map(&user)
}
|
package dig
import "github.com/mazrean/gold-rush-beta/openapi"
type Point struct {
*openapi.Dig
Amount int32
Type string
}
var (
depthTimeMap = [10]float64{8, 9, 10, 11, 12, 12.5, 13, 13.5, 14, 14.5}
depthCoinMap = [10]float64{0.5, 1, 2, 3, 4, 5, 7.5, 10, 15, 35}
)
func (p *Point) priority() float64 {
return float64(p.Amount) * depthCoinMap[p.Depth-1] / ((11 - float64(p.Depth)) * depthTimeMap[p.Depth-1])
}
type PointHeap []*Point
func (ph PointHeap) Len() int { return len(ph) }
func (ph PointHeap) Less(i, j int) bool {
return ph[i].priority() > ph[j].priority()
}
func (ph PointHeap) Swap(i, j int) { ph[i], ph[j] = ph[j], ph[i] }
func (ph *PointHeap) Push(x interface{}) {
*ph = append(*ph, x.(*Point))
}
func (ph *PointHeap) Pop() interface{} {
old := *ph
n := len(old)
x := old[n-1]
*ph = old[0 : n-1]
return x
}
|
package main
import (
myjs "syscall/js"
"github.com/robertkrimen/otto"
)
var document = myjs.Global().Get("document")
func getElementByID(id string) myjs.Value {
return document.Call("getElementById", id)
}
func renderEditor(parent myjs.Value) myjs.Value {
editorMarkup := `
<div id="editor" style="display: flex; flex-flow: row wrap;">
<textarea id="input" style="width: 50%; height: 400px"></textarea>
<div id="preview" style="width: 50%;"></div>
</div>
`
parent.Call("insertAdjacentHTML", "beforeend", editorMarkup)
return getElementByID("editor")
}
func main() {
quit := make(chan struct{}, 0)
// See example 2: Enable the stop button
stopButton := getElementByID("stop")
stopButton.Set("disabled", false)
stopButton.Set("onclick", myjs.NewCallback(func([]myjs.Value) {
println("stopping")
stopButton.Set("disabled", true)
quit <- struct{}{}
}))
editor := renderEditor(document.Get("body"))
preview := getElementByID("preview")
input := getElementByID("input")
vm := otto.New()
// renderButton := getElementByID("render")
input.Set("oninput", myjs.NewCallback(func([]myjs.Value) {
v, _ := vm.Run(input.Get("value").String())
s, _ := v.ToString()
preview.Set("textContent", s)
}))
<-quit
editor.Call("remove")
}
|
package main
import (
"io"
"io/ioutil"
"log"
"github.com/classmethod/aurl/profiles"
"github.com/classmethod/aurl/request"
"gopkg.in/alecthomas/kingpin.v2"
)
// Exit codes are int values that represent an exit code for a particular error.
const (
ExitCodeOK int = 0
ExitCodeError int = 1 + iota
)
// CLI is the command line object
type CLI struct {
// outStream and errStream are the stdout and stderr
// to write message from the CLI.
outStream, errStream io.Writer
}
var (
profileName = kingpin.Flag("profile", "Set profile name. (default: \"default\")").Short('p').Default("default").String()
method = kingpin.Flag("request", "Set HTTP request method. (default: \"GET\")").Short('X').Default("GET").String()
headers = HTTPHeader(kingpin.Flag("header", "Add HTTP headers to the request.").Short('H').PlaceHolder("HEADER:VALUE"))
data = kingpin.Flag("data", "Set HTTP request body.").Short('d').String()
insecure = kingpin.Flag("insecure", "Disable SSL certificate verification.").Short('k').Bool()
printBody = kingpin.Flag("print-body", "Enable printing response body to stdout. (default: enabled, try --no-print-body)").Default("true").Bool()
printHeaders = kingpin.Flag("print-headers", "Enable printing response headers JSON to stdout. (default: disabled, try --no-print-headers)").Bool()
verbose = kingpin.Flag("verbose", "Enable verbose logging to stderr.").Short('v').Bool()
targetUrl = kingpin.Arg("url", "The URL to request").Required().String()
)
// Run invokes the CLI with the given arguments.
func (cli *CLI) Run(args []string) int {
kingpin.UsageTemplate(kingpin.CompactUsageTemplate).Version(version).Author(maintainer)
kingpin.CommandLine.VersionFlag.Short('V')
kingpin.CommandLine.HelpFlag.Short('h')
kingpin.CommandLine.Help = "Command line utility to make HTTP request with OAuth2."
kingpin.Parse()
if *verbose {
log.SetOutput(cli.errStream)
log.SetPrefix("**** ")
log.SetFlags(log.LstdFlags | log.Lshortfile)
} else {
log.SetOutput(ioutil.Discard)
}
log.Println("Parsed arguments:")
log.Printf(" profile: %s\n", *profileName)
log.Printf(" request: %s\n", *method)
log.Printf(" headers: %s\n", *headers)
log.Printf(" data: %s\n", *data)
log.Printf(" insecure: %v\n", *insecure)
log.Printf(" printBody: %v\n", *printBody)
log.Printf(" printHeaders: %v\n", *printHeaders)
log.Printf(" verbose: %v\n", *verbose)
log.Printf(" targetUrl: %v\n", *targetUrl)
// TODO: to be simplified
profiles.Name = name
profiles.Version = version
if profile, err := profiles.LoadProfile(*profileName); err != nil {
kingpin.FatalIfError(err, "Load profile failed")
return ExitCodeError
} else {
execution := &request.AurlExecution{
Name: name,
Version: version,
Profile: profile,
Method: method,
Headers: headers,
Data: data,
Insecure: insecure,
PrintBody: printBody,
PrintHeaders: printHeaders,
TargetUrl: targetUrl,
}
kingpin.FatalIfError(execution.Execute(), "Request failed")
return ExitCodeOK
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.