text stringlengths 11 4.05M |
|---|
package storage
import (
"context"
"errors"
"io/ioutil"
"log"
"os"
"path"
"time"
"github.com/sirupsen/logrus"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type MinIO struct {
*minio.Client
root string
}
func newMinIO(addr string) (*MinIO, error) {
s3Client, err := minio.New(addr, &minio.Options{
Creds: credentials.NewStaticV4("liuz0063", "12345678", ""),
})
if err != nil {
log.Fatalln(err)
}
tmpDir, err := ioutil.TempDir("", "minIO")
if err != nil {
return nil, err
}
return &MinIO{
Client: s3Client,
root: tmpDir,
}, nil
}
func (m *MinIO) Close() {
}
func (m *MinIO) Image(id string, data []byte) error {
fn := path.Join(m.root, id)
if _, err := os.Stat(id); os.IsExist(err) {
return errors.New("file exists")
}
if err := ioutil.WriteFile(fn, data, os.ModePerm); err != nil {
return errors.New("write file to temporary location failed")
}
bucket := time.Now().Format("2006-01-02")
ctx := context.Background()
exists, err := m.BucketExists(ctx, bucket)
if err != nil {
logrus.Error(err)
} else if !exists {
if err := m.MakeBucket(ctx, bucket, minio.MakeBucketOptions{
Region: "sg",
}); err != nil {
logrus.Error("make bucket error: ", err)
}
}
if _, err := m.FPutObject(ctx, bucket, id, fn, minio.PutObjectOptions{}); err != nil {
return errors.New("file upload to cloud storage failed")
}
logrus.Debug("Successfully uploaded")
return nil
}
|
package pool
import (
"sync"
. "github.com/rainmyy/easyDB/library/res"
)
const (
defaultRuntineNumber = 10
defailtTotal = 10
)
type Pool struct {
//mutex sync.WaitGroup
RuntineNumber int
Total int
taskQuery chan *Queue
taskResult chan map[string]*Reponse
taskResponse map[string]*Reponse
}
/**
执行队列
*/
type Queue struct {
Name string
result chan *Reponse
Excel *ExcelFunc
CallBack *CallBackFunc
}
type ExcelFunc struct {
Name string
Function interface{}
Params []interface{}
}
type CallBackFunc struct {
name string
Function interface{}
Params []interface{}
}
func GetInstance() *Pool {
return new(Pool)
}
func QueryInit(name string, function interface{}, params ...interface{}) *Queue {
excelFunc := &ExcelFunc{Function: function, Params: params}
query := &Queue{Name: name,
Excel: excelFunc,
result: make(chan *Reponse, 1),
}
return query
}
func (q *Queue) CallBackInit(name string, function interface{}, params ...interface{}) *Queue {
callBackFunc := &CallBackFunc{name: name, Function: function, Params: params}
q.CallBack = callBackFunc
return q
}
func (this *Pool) Init(runtineNumber, total int) *Pool {
this.RuntineNumber = runtineNumber
this.Total = total
this.taskQuery = make(chan *Queue, runtineNumber)
this.taskResult = make(chan map[string]*Reponse, runtineNumber)
this.taskResponse = make(map[string]*Reponse)
return this
}
func (this *Pool) Start() {
runtineNumber := this.RuntineNumber
if len(this.taskQuery) != runtineNumber {
runtineNumber = len(this.taskQuery)
}
var mutex sync.WaitGroup
for i := 0; i < runtineNumber; i++ {
mutex.Add(1)
go func(num int) {
defer mutex.Done()
task, ok := <-this.taskQuery
taskName := task.Name
result := map[string]*Reponse{
taskName: nil,
}
response := ReponseIntance()
if !ok {
res := ResultInstance().ErrorParamsResult()
response.Result = res
result[taskName] = response
this.taskResult <- result
return
}
task.excelQuery()
taskResult, ok := <-task.result
if !ok {
res := ResultInstance().EmptyResult()
response.Result = res
result[taskName] = response
this.taskResult <- result
return
}
result = map[string]*Reponse{
taskName: taskResult,
}
this.taskResult <- result
}(i)
}
mutex.Wait()
for i := 0; i < runtineNumber; i++ {
if result, ok := <-this.taskResult; ok {
for name, value := range result {
this.taskResponse[name] = value
}
}
}
}
func (this *Pool) TaskResult() map[string]*Reponse {
return this.taskResponse
}
func (this *Pool) Stop() {
close(this.taskResult)
}
func (this *Pool) AddTask(task *Queue) {
this.taskQuery <- task
}
/**
* 执行队列
*/
func (qeury *Queue) excelQuery() {
defer close(qeury.result)
excelFunc := qeury.Excel.Function
if excelFunc == nil {
return
}
var requestChannel = make(chan []interface{})
go func() {
defer close(requestChannel)
params := qeury.Excel.Params
result := FuncCall(excelFunc, params...)
if result == nil {
return
}
requestChannel <- result
}()
result, ok := <-requestChannel
if !ok {
return
}
response := FormatResult(result)
if response == nil {
return
}
var callBackChannel = make(chan []interface{})
go func() {
defer close(callBackChannel)
if qeury.CallBack == nil {
return
}
result := FuncCall(qeury.CallBack.Function, qeury.CallBack.Params...)
if result == nil {
return
}
callBackChannel <- result
}()
resultList, ok := <-callBackChannel
if !ok && response != nil {
qeury.result <- response
return
}
callBackResponse := FormatResult(resultList).Result
if callBackResponse != nil {
response.Callback = callBackResponse
}
qeury.result <- response
}
|
package logpusher
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
const (
// PostActionURL API endpoint of logpusher
PostActionURL = "https://api.logpusher.com/api/agent/savelog"
)
// PushResult model
type PushResult struct {
Message string `json:"message"`
}
// Client all parameter about log or exceptions.
type Client struct {
authKey string
apiKey string
logMessage string
source string
category string
logType string
logTime string
createdDate string
eventID string
email string
password string
}
// New Get API key from logpusher.com
func New(email, password, apiKey string) Client {
c := Client{email: email, apiKey: apiKey, password: password}
return c
}
// Push save log over logpusher
func (c *Client) Push(message, source, category, logtype, eventid string, logtime time.Time, createdate time.Time) (result PushResult, err error) {
c.logMessage = message
c.source = source
c.category = category
c.logType = logtype
c.logTime = logtime.Format("hh:MM")
c.createdDate = createdate.Format("yyyy-mm-ddThh:MM:ssZ:Z")
c.eventID = eventid
return c.send()
}
// AutoPush quick push. will Date,Time and EventID fields auto generate by function
func (c *Client) AutoPush(message, source, category, logtype string) (result PushResult, err error) {
c.logMessage = message
c.source = source
c.category = category
c.logType = logtype
c.logTime = time.Now().Format("15:04")
c.createdDate = time.Now().Format("2006-01-02T15:04:05Z07:00")
c.eventID = fmt.Sprintf("%d", time.Now().Unix())
return c.send()
}
func (c *Client) send() (result PushResult, err error) {
body := c.reqValues()
rspText, err := c.do(PostActionURL, body)
if err != nil {
return result, err
}
result, err = c.unmarshall(rspText)
if err != nil {
return result, err
}
return result, err
}
func (c *Client) do(url string, body *bytes.Buffer) (string, error) {
rsp, err := http.Post(url, "application/json", body)
if err != nil {
return "", err
}
defer rsp.Body.Close()
b, err := ioutil.ReadAll(rsp.Body)
if err != nil {
return "", err
}
return string(b), nil
}
func (c *Client) unmarshall(rspText string) (result PushResult, err error) {
err = json.Unmarshal([]byte(rspText), &result)
return result, err
}
func (c *Client) reqValues() *bytes.Buffer {
currentTime := time.Now().Format("01.02.2006 00:00:00")
values := map[string]string{
"AuthKey": c.generateAuthKey(currentTime),
"ApiKey": c.apiKey,
"LogMessage": c.logMessage,
"Source": c.source,
"Category": c.category,
"LogType": c.logType,
"LogTime": c.logTime,
"CreatedDate": c.createdDate,
"EventId": c.eventID,
}
jsonValue, _ := json.Marshal(values)
return bytes.NewBuffer(jsonValue)
}
// generateAuthKey currentTime format muste be: 01.02.2006 00:00:00
func (c *Client) generateAuthKey(currentTime string) string {
md5CheckSum := md5.Sum([]byte(c.password))
payload := fmt.Sprintf("%s|%x|%s", c.email, md5CheckSum, currentTime)
return base64.StdEncoding.EncodeToString([]byte(payload))
}
|
package main
import (
"sort"
"fmt"
"runtime/pprof"
"flag"
"os"
"runtime"
"log"
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
func threeSum(nums []int) [][]int {
ret := make([][]int, 0)
return ret
sort.Ints(nums)
length := len(nums)
for i, v := range nums {
start := 0
end := length -1
for start < end {
if nums[start] + nums[end] + v > 0 {
end --
} else if nums[start] + nums[end] + v < 0 {
start ++
} else {
if i != start && i != end {
ret = append(ret, []int{nums[i], nums[start], nums[end]})
fmt.Println(ret)
break
}
}
}
}
return ret
}
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
// ... rest of the program ...
nums := []int{-1, 0, 1, 2, -1, -4}
fmt.Println(threeSum(nums))
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
f.Close()
}
} |
/*
* Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
*
* file: request.go
* details: Deals with the validity of the request or any other processing of the request before
* passing to the actual handler
*
*/
package request
import (
"encoding/json"
"net/http"
res "github.com/Juniper/collector/query-api/response"
)
func DecodeBody(r *http.Request, v interface{}) error {
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(v)
}
func DecodeBodyNumber(r *http.Request, v interface{}) error {
d := json.NewDecoder(r.Body)
d.UseNumber()
return d.Decode(v)
}
func IsValidAuthKey(key string) bool {
/* We need to validate the request */
return true
}
func IsValidRequest(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !IsValidAuthKey(r.URL.Query().Get("X-Auth-Token")) {
res.RespondErr(w, r, http.StatusUnauthorized, "Invalid Auth Key")
return
}
fn(w, r)
}
}
|
package sdk
import (
"context"
"net/http"
rm "github.com/brigadecore/brigade/sdk/v3/internal/restmachinery"
"github.com/brigadecore/brigade/sdk/v3/restmachinery"
)
// AuthnClient is the root of a tree of more specialized API clients for dealing
// with identity and authentication.
type AuthnClient interface {
// WhoAmI returns a PrincipalReference for the currently authenticated
// principal.
WhoAmI(context.Context) (PrincipalReference, error)
// ServiceAccounts returns a specialized client for ServiceAccount management.
ServiceAccounts() ServiceAccountsClient
// Sessions returns a specialized client for Session management.
Sessions() SessionsClient
// Users returns a specialized client for User management.
Users() UsersClient
}
type authnClient struct {
*rm.BaseClient
// serviceAccountsClient is a specialized client for ServiceAccount
// management.
serviceAccountsClient ServiceAccountsClient
// sessionsClient is a specialized client for Session management.
sessionsClient SessionsClient
// usersClient is a specialized client for User management.
usersClient UsersClient
}
// NewAuthnClient returns an AuthnClient, which is the root of a tree of more
// specialized API clients for dealing with identity and authentication. It will
// initialize all clients in the tree so they are ready for immediate use.
func NewAuthnClient(
apiAddress string,
apiToken string,
opts *restmachinery.APIClientOptions,
) AuthnClient {
return &authnClient{
BaseClient: rm.NewBaseClient(apiAddress, apiToken, opts),
serviceAccountsClient: NewServiceAccountsClient(
apiAddress,
apiToken,
opts,
),
sessionsClient: NewSessionsClient(apiAddress, apiToken, opts),
usersClient: NewUsersClient(apiAddress, apiToken, opts),
}
}
func (a *authnClient) WhoAmI(ctx context.Context) (PrincipalReference, error) {
ref := PrincipalReference{}
return ref, a.ExecuteRequest(
ctx,
rm.OutboundRequest{
Method: http.MethodGet,
Path: "v2/whoami",
SuccessCode: http.StatusOK,
RespObj: &ref,
},
)
}
func (a *authnClient) ServiceAccounts() ServiceAccountsClient {
return a.serviceAccountsClient
}
func (a *authnClient) Sessions() SessionsClient {
return a.sessionsClient
}
func (a *authnClient) Users() UsersClient {
return a.usersClient
}
|
package user
import (
"fmt"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cli/user"
"github.com/10gen/realm-cli/internal/terminal"
"github.com/10gen/realm-cli/internal/utils/flags"
)
// CommandMetaCreate is the command meta for the `user create` command
var CommandMetaCreate = cli.CommandMeta{
Use: "create",
Display: "user create",
Description: "Create an application user for your Realm app",
HelpText: `Adds a new User to your Realm app. You can create a User for the following
enabled Auth Providers: "Email/Password", or "API Key".`,
}
// CommandCreate is the `user create` command
type CommandCreate struct {
inputs createInputs
}
// Flags is the command flags
func (cmd *CommandCreate) Flags() []flags.Flag {
return []flags.Flag{
cli.AppFlagWithContext(&cmd.inputs.App, "to create its users"),
cli.ProjectFlag(&cmd.inputs.Project),
cli.ProductFlag(&cmd.inputs.Products),
flags.CustomFlag{
Value: &cmd.inputs.UserType,
Meta: flags.Meta{
Name: "type",
Usage: flags.Usage{
Description: "Select the type of user to create",
DefaultValue: "<none>",
AllowedValues: []string{
string(userTypeAPIKey),
string(userTypeEmailPassword),
},
},
},
},
flags.StringFlag{
Value: &cmd.inputs.APIKeyName,
Meta: flags.Meta{
Name: "name",
Usage: flags.Usage{
Description: "Specify the name of the new API Key",
},
},
},
flags.StringFlag{
Value: &cmd.inputs.Email,
Meta: flags.Meta{
Name: "email",
Usage: flags.Usage{
Description: "Specify the email of the new user",
},
},
},
flags.StringFlag{
Value: &cmd.inputs.Password,
Meta: flags.Meta{
Name: "password",
Usage: flags.Usage{
Description: "Specify the password of the new user",
},
},
},
}
}
// Inputs is the command inputs
func (cmd *CommandCreate) Inputs() cli.InputResolver {
return &cmd.inputs
}
// Handler is the command handler
func (cmd *CommandCreate) Handler(profile *user.Profile, ui terminal.UI, clients cli.Clients) error {
app, err := cli.ResolveApp(ui, clients.Realm, cmd.inputs.Filter())
if err != nil {
return err
}
switch cmd.inputs.UserType {
case userTypeAPIKey:
apiKey, err := clients.Realm.CreateAPIKey(app.GroupID, app.ID, cmd.inputs.APIKeyName)
if err != nil {
return fmt.Errorf("failed to create api key: %s", err)
}
ui.Print(terminal.NewJSONLog(
"Successfully created api key",
newUserAPIKeyOutputs{
newUserOutputs: newUserOutputs{
ID: apiKey.ID,
Enabled: !apiKey.Disabled,
},
Name: apiKey.Name,
Key: apiKey.Key,
},
))
case userTypeEmailPassword:
user, err := clients.Realm.CreateUser(app.GroupID, app.ID, cmd.inputs.Email, cmd.inputs.Password)
if err != nil {
return fmt.Errorf("failed to create user: %s", err)
}
ui.Print(terminal.NewJSONLog(
"Successfully created user",
newUserEmailOutputs{
newUserOutputs: newUserOutputs{
ID: user.ID,
Enabled: !user.Disabled,
},
Email: user.Data["email"],
Type: user.Type,
},
))
}
return nil
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
import (
"errors"
"fmt"
"os"
"time"
"github.com/luci/luci-go/client/archiver"
"github.com/luci/luci-go/client/internal/common"
"github.com/luci/luci-go/client/isolatedclient"
"github.com/maruel/subcommands"
)
var cmdArchive = &subcommands.Command{
UsageLine: "archive <options>...",
ShortDesc: "creates a .isolated file and uploads the tree to an isolate server.",
LongDesc: "All the files listed in the .isolated file are put in the isolate server.",
CommandRun: func() subcommands.CommandRun {
c := archiveRun{}
c.commonFlags.Init()
c.Flags.Var(&c.dirs, "dirs", "Directory(ies) to archive")
c.Flags.Var(&c.files, "files", "Individual file(s) to archive")
c.Flags.Var(&c.blacklist, "blacklist",
"List of regexp to use as blacklist filter when uploading directories")
return &c
},
}
type archiveRun struct {
commonFlags
dirs common.Strings
files common.Strings
blacklist common.Strings
}
func (c *archiveRun) Parse(a subcommands.Application, args []string) error {
if err := c.commonFlags.Parse(); err != nil {
return err
}
if len(args) != 0 {
return errors.New("position arguments not expected")
}
return nil
}
func (c *archiveRun) main(a subcommands.Application, args []string) error {
start := time.Now()
out := os.Stdout
prefix := "\n"
if c.defaultFlags.Quiet {
out = nil
prefix = ""
}
arch := archiver.New(isolatedclient.New(c.isolatedFlags.ServerURL, c.isolatedFlags.Namespace), out)
common.CancelOnCtrlC(arch)
futures := []archiver.Future{}
names := []string{}
for _, file := range c.files {
futures = append(futures, arch.PushFile(file, file))
names = append(names, file)
}
for _, d := range c.dirs {
futures = append(futures, archiver.PushDirectory(arch, d, "", nil))
names = append(names, d)
}
for i, future := range futures {
future.WaitForHashed()
if err := future.Error(); err == nil {
fmt.Printf("%s%s %s\n", prefix, future.Digest(), names[i])
} else {
fmt.Printf("%s%s failed: %s\n", prefix, names[i], err)
}
}
// This waits for all uploads.
err := arch.Close()
if !c.defaultFlags.Quiet {
duration := time.Since(start)
stats := arch.Stats()
fmt.Fprintf(os.Stderr, "Hits : %5d (%s)\n", stats.TotalHits(), stats.TotalBytesHits())
fmt.Fprintf(os.Stderr, "Misses : %5d (%s)\n", stats.TotalMisses(), stats.TotalBytesPushed())
fmt.Fprintf(os.Stderr, "Duration: %s\n", common.Round(duration, time.Millisecond))
}
return err
}
func (c *archiveRun) Run(a subcommands.Application, args []string) int {
if err := c.Parse(a, args); err != nil {
fmt.Fprintf(a.GetErr(), "%s: %s\n", a.GetName(), err)
return 1
}
cl, err := c.defaultFlags.StartTracing()
if err != nil {
fmt.Fprintf(a.GetErr(), "%s: %s\n", a.GetName(), err)
return 1
}
defer cl.Close()
if err := c.main(a, args); err != nil {
fmt.Fprintf(a.GetErr(), "%s: %s\n", a.GetName(), err)
return 1
}
return 0
}
|
package bgControllers
import (
"github.com/astaxie/beego"
"strconv"
"GiantTech/models"
"GiantTech/controllers/tools"
)
type BgProjectFileController struct {
beego.Controller
}
func (this *BgProjectFileController) Prepare() {
s := this.StartSession()
username = s.Get("login")
beego.Informational(username)
if username == nil {
this.Ctx.Redirect(302, "/login")
}
}
func (this *BgProjectFileController) Get() {
s := this.StartSession()
p := this.Ctx.Request.FormValue("page")
page, _ := strconv.Atoi(p)
id := this.Ctx.Request.FormValue("id")
s.Set("ProjectId", id)
projectId, _ := strconv.Atoi(id)
project, _ := models.GetTProjectsById(projectId)
this.Data["ProjectName"] = project.ProjectName
offset := (page-1)*prepage
user, _ := models.GetTUsersByName(username.(string))
this.Data["User"] = user
query := make(map[string]string)
query["FileProjectID"] = id
var order, sortBy []string
order = append(order, "desc")
sortBy = append(sortBy, "FileCreatedTime")
i, _ := models.GetAllTProjectFile(query, nil, sortBy, order, 0, 0)
if files, err := models.GetAllTProjectFile(query, nil, sortBy, order, int64(offset), int64(prepage)); err == nil {
res := tools.Paginator(page, prepage, int64(len(i)))
this.Data["paginator"] = res
this.Data["files"] = files
this.Data["ProjectId"] = id
}else {
res := tools.Paginator(page, prepage, 0)
this.Data["paginator"] = res
beego.Error(err)
}
this.TplName = "bgview/projectfile.html"
} |
package resources
import "github.com/go-redis/redis"
func NewRedisResource(config *RedisConfig) (ResourceInterface, error) {
return &RedisResource{config: config}, nil
}
type RedisResource struct {
config *RedisConfig
client *redis.Client
}
type RedisConfig struct {
Address string
}
func (this *RedisResource) Get() (interface{}, error) {
// create redis session
this.client = redis.NewClient(&redis.Options{
Addr: this.config.Address,
})
return this.client, nil
}
func (this *RedisResource) Close() bool {
if this.client != nil {
this.client.Close()
return true
}
return false
}
|
package dddshop
import (
"fmt"
"github.com/sueken5/golang-ddd/pkg/dddshop/interfaces/http"
)
func Execute() error {
//di...
srv := http.NewServer()
if err := srv.Run(); err != nil {
fmt.Errorf("dddshop exec err: %v", err)
}
return nil
}
|
package lambdacalculus
import (
"testing"
)
func TestPair_First(t *testing.T) {
res := Tuple2Struct(1)(2)(First)
if res != 1 {
t.Errorf("First of pair(1)(2) should be 1 instead is %v", res)
}
}
func TestPair_Second(t *testing.T) {
res := Tuple2Struct(1)(2)(Second)
if res != 2 {
t.Errorf("Second of pair(1)(2) should be 2 instead is %v", res)
}
}
func TestPair_ChurchNumbers_First(t *testing.T) {
res := Tuple2Struct(one)(two)(First).(ChurchNumber)(f)(x)
if res != 1 {
t.Errorf("First of pair(one)(two) should be 1 instead is %v", res)
}
}
func TestPair_ChurchNumbers_Second(t *testing.T) {
res := Tuple2Struct(one)(two)(Second).(ChurchNumber)(f)(x)
if res != 2 {
t.Errorf("Second of pair(one)(two) should be 2 instead is %v", res)
}
}
|
package student
type student struct{
Name string
Age int
score float64
}
// 写一个方法,传入数据,然后返回一个student
func NewStu(n string,a int,s float64) *student{
return &student{
Name : n,
Age : a,
score : s, // 这里score首字母小写,在其他包就没法正常用,处理方式是给他单独一个方法
}
}
func (stu *student)GetScore() float64{
return stu.score
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
)
func main() {
var name string
fmt.Print("Input the name: ")
fmt.Scanln(&name)
fmt.Print("Input the address: ")
inputReader := bufio.NewReader(os.Stdin)
address, _ := inputReader.ReadString('\n')
address = strings.TrimSuffix(address, "\r\n")
var data = make(map[string]string)
data["name"] = name
data["address"] = address
jsonData, err := json.Marshal(data)
if err != nil {
panic(err)
}
fmt.Println("JSON data: ", string(jsonData))
}
|
package deploy
import (
"github.com/devspace-cloud/devspace/cmd"
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/e2e/utils"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/pkg/errors"
)
//Test 1 - default
//1. deploy (without profile & var)
//2. deploy --force-build & check if rebuild
//3. deploy --force-deploy & check NO build but deployed
//4. deploy --force-dependencies & check NO build & check NO deployment but dependencies are deployed
//5. deploy --force-deploy --deployments=default,test2 & check NO build & only deployments deployed
// RunDefault runs the test for the default deploy test
func RunDefault(f *customFactory, logger log.Logger) error {
logger.Info("Run sub test 'default' of test 'deploy'")
logger.StartWait("Run test...")
defer logger.StopWait()
client, err := f.NewKubeClientFromContext("", f.Namespace, false)
if err != nil {
return errors.Errorf("Unable to create new kubectl client: %v", err)
}
// The client is saved in the factory ONCE for each sub test
f.Client = client
ts := testSuite{
test{
name: "1. deploy (without profile & var)",
deployConfig: &cmd.DeployCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
},
},
postCheck: func(f *customFactory, t *test) error {
err := checkPortForwarding(f, t.deployConfig)
if err != nil {
return err
}
return nil
},
},
test{
name: "2. deploy --force-build & check if rebuild",
deployConfig: &cmd.DeployCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
},
ForceBuild: true,
},
postCheck: func(f *customFactory, t *test) error {
imagesExpected := 1
imagesCount := len(f.builtImages)
if imagesCount != imagesExpected {
return errors.Errorf("built images expected: %v, found: %v", imagesExpected, imagesCount)
}
return nil
},
},
test{
name: "3. deploy --force-deploy & check NO build but deployed",
deployConfig: &cmd.DeployCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
},
ForceDeploy: true, // Only forces to redeploy deployments
},
postCheck: func(f *customFactory, t *test) error {
imagesExpected := 0
imagesCount := len(f.builtImages)
if imagesCount != imagesExpected {
return errors.Errorf("built images expected: %v, found: %v", imagesExpected, imagesCount)
}
wasDeployed, err := utils.LookForDeployment(f.Client, f.Namespace, "sh.helm.release.v1.root-app.v2")
if err != nil {
return err
}
if !wasDeployed {
return errors.New("expected deployment 'sh.helm.release.v1.root-app.v2' was not found")
}
return nil
},
},
test{
name: "4. deploy --force-dependencies & check NO build & check NO deployment but dependencies are deployed",
deployConfig: &cmd.DeployCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
},
ForceDeploy: true,
ForceDependencies: true,
},
postCheck: func(f *customFactory, t *test) error {
// No build
imagesExpected := 0
imagesCount := len(f.builtImages)
if imagesCount != imagesExpected {
return errors.Errorf("built images expected: %v, found: %v", imagesExpected, imagesCount)
}
deployedDependencies := []string{"sh.helm.release.v1.dependency1.v2", "sh.helm.release.v1.dependency2.v2"}
wasDeployed, err := utils.LookForDeployment(f.Client, f.Namespace, deployedDependencies...)
if err != nil {
return err
}
if !wasDeployed {
return errors.New("expected dependency deployment was not found")
}
return nil
},
},
test{
name: "5. deploy --force-deploy --deployments=default,test2 & check NO build & only deployments deployed",
deployConfig: &cmd.DeployCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
},
ForceDeploy: true,
Deployments: "root-app",
},
postCheck: func(f *customFactory, t *test) error {
// No build
imagesExpected := 0
imagesCount := len(f.builtImages)
if imagesCount != imagesExpected {
return errors.Errorf("built images expected: %v, found: %v", imagesExpected, imagesCount)
}
shouldBeDeployed := "sh.helm.release.v1.root-app.v4"
shouldNotBeDeployed := "sh.helm.release.v1.php-app.v5"
wasDeployed, err := utils.LookForDeployment(f.Client, f.Namespace, shouldBeDeployed)
if err != nil {
return err
}
if !wasDeployed {
return errors.Errorf("expected deployment '%v' was not found", shouldBeDeployed)
}
wasDeployed, err = utils.LookForDeployment(f.Client, f.Namespace, shouldNotBeDeployed)
if err != nil {
return err
}
if wasDeployed {
return errors.Errorf("deployment '%v' should not be found", shouldNotBeDeployed)
}
return nil
},
},
}
err = beforeTest(f, logger, "tests/deploy/testdata/default")
defer afterTest(f)
if err != nil {
return errors.Errorf("sub test 'default' of 'deploy' test failed: %s %v", f.GetLogContents(), err)
}
for _, t := range ts {
err := runTest(f, &t)
utils.PrintTestResult("default", t.name, err, logger)
if err != nil {
return errors.Errorf("sub test 'default' of 'deploy' test failed: %s %v", f.GetLogContents(), err)
}
}
return nil
}
func checkPortForwarding(f *customFactory, deployConfig *cmd.DeployCmd) error {
// Load generated config
generatedConfig, err := f.NewConfigLoader(nil, nil).Generated()
if err != nil {
return errors.Errorf("Error loading generated.yaml: %v", err)
}
// Add current kube context to context
configOptions := deployConfig.ToConfigOptions()
config, err := f.NewConfigLoader(configOptions, f.GetLog()).Load()
if err != nil {
return err
}
// Port-forwarding
err = utils.PortForwardAndPing(config, generatedConfig, f.Client, f.GetLog())
if err != nil {
return err
}
return nil
}
|
/**
* @Author xieed
* @Description 版本号工具类
* @Date 2020/9/24 19:33
**/
package utils
import "strings"
type Version struct {
versions []int
}
var defaultVersionSeparator = "."
// 版本号自增
func IncrementVersion(versionStr string) (res string) {
versionArray := ToIntArrayBySeparator(versionStr, defaultVersionSeparator)
lastIndex := len(versionArray) - 1
versionArray[lastIndex] = versionArray[lastIndex] + 1
return IntArray2String(versionArray, defaultVersionSeparator)
}
// 分支名称格式化
func BranchNameFormat(branchNameFmt string, repoName string, lang string, branchVersion string) (res string) {
res = branchNameFmt
res = strings.ReplaceAll(res, "${repo_name}", repoName)
res = strings.ReplaceAll(res, "${lang}", lang)
res = strings.ReplaceAll(res, "${branch_version}", branchVersion)
return res
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package runner
import (
"github.com/rcrowley/go-metrics"
log "github.com/sirupsen/logrus"
"github.com/streamsets/datacollector-edge/api/validation"
"github.com/streamsets/datacollector-edge/container/execution"
"github.com/streamsets/datacollector-edge/container/util"
"time"
)
const (
INPUT_RECORDS = ".inputRecords"
OUTPUT_RECORDS = ".outputRecords"
ERROR_RECORDS = ".errorRecords"
STAGE_ERRORS = ".stageErrors"
BATCH_PROCESSING = ".batchProcessing"
)
type Pipe interface {
Init() []validation.Issue
Process(pipeBatch *FullPipeBatch) error
Destroy()
IsSource() bool
IsProcessor() bool
IsTarget() bool
}
type StagePipe struct {
config execution.Config
Stage StageRuntime
InputLanes []string
OutputLanes []string
EventLanes []string
inputRecordsCounter metrics.Counter
outputRecordsCounter metrics.Counter
errorRecordsCounter metrics.Counter
stageErrorsCounter metrics.Counter
inputRecordsMeter metrics.Meter
outputRecordsMeter metrics.Meter
errorRecordsMeter metrics.Meter
stageErrorsMeter metrics.Meter
inputRecordsHistogram metrics.Histogram
outputRecordsHistogram metrics.Histogram
errorRecordsHistogram metrics.Histogram
stageErrorsHistogram metrics.Histogram
processingTimer metrics.Timer
outputRecordsPerLaneCounter map[string]metrics.Counter
outputRecordsPerLaneMeter map[string]metrics.Meter
}
func (s *StagePipe) Init() []validation.Issue {
issues := s.Stage.Init()
if len(issues) == 0 {
metricRegistry := s.Stage.stageContext.GetMetrics()
metricsKey := "stage." + s.Stage.config.InstanceName
s.inputRecordsCounter = util.CreateCounter(metricRegistry, metricsKey+INPUT_RECORDS)
s.outputRecordsCounter = util.CreateCounter(metricRegistry, metricsKey+OUTPUT_RECORDS)
s.errorRecordsCounter = util.CreateCounter(metricRegistry, metricsKey+ERROR_RECORDS)
s.stageErrorsCounter = util.CreateCounter(metricRegistry, metricsKey+STAGE_ERRORS)
s.inputRecordsMeter = util.CreateMeter(metricRegistry, metricsKey+INPUT_RECORDS)
s.outputRecordsMeter = util.CreateMeter(metricRegistry, metricsKey+OUTPUT_RECORDS)
s.errorRecordsMeter = util.CreateMeter(metricRegistry, metricsKey+ERROR_RECORDS)
s.stageErrorsMeter = util.CreateMeter(metricRegistry, metricsKey+STAGE_ERRORS)
s.inputRecordsHistogram = util.CreateHistogram5Min(metricRegistry, metricsKey+INPUT_RECORDS)
s.outputRecordsHistogram = util.CreateHistogram5Min(metricRegistry, metricsKey+OUTPUT_RECORDS)
s.errorRecordsHistogram = util.CreateHistogram5Min(metricRegistry, metricsKey+ERROR_RECORDS)
s.stageErrorsHistogram = util.CreateHistogram5Min(metricRegistry, metricsKey+STAGE_ERRORS)
s.processingTimer = util.CreateTimer(metricRegistry, metricsKey+BATCH_PROCESSING)
if len(s.Stage.config.OutputLanes) > 0 {
s.outputRecordsPerLaneCounter = make(map[string]metrics.Counter)
s.outputRecordsPerLaneMeter = make(map[string]metrics.Meter)
for _, lane := range s.Stage.config.OutputLanes {
s.outputRecordsPerLaneCounter[lane] =
util.CreateCounter(metricRegistry, metricsKey+":"+lane+OUTPUT_RECORDS)
s.outputRecordsPerLaneMeter[lane] =
util.CreateMeter(metricRegistry, metricsKey+":"+lane+OUTPUT_RECORDS)
}
}
}
return issues
}
func (s *StagePipe) Process(pipeBatch *FullPipeBatch) error {
log.WithField("stage", s.Stage.config.InstanceName).Debug("Processing Stage")
start := time.Now()
batchMaker := pipeBatch.StartStage(*s)
batchImpl := pipeBatch.GetBatch(*s)
newOffset, err := s.Stage.Execute(pipeBatch.GetPreviousOffset(), s.config.MaxBatchSize, batchImpl, batchMaker)
if err != nil {
return err
}
if s.IsSource() {
pipeBatch.SetNewOffset(newOffset)
}
pipeBatch.CompleteStage(batchMaker)
// Update metric registry
s.processingTimer.UpdateSince(start)
instanceName := s.Stage.config.InstanceName
errorSink := pipeBatch.GetErrorSink()
stageErrorRecordsCount := int64(len(errorSink.GetStageErrorRecords(instanceName)))
stageErrorMessagesCount := int64(len(errorSink.GetStageErrorMessages(instanceName)))
inputRecordsCount := int64(len(batchImpl.records))
outputRecordsCount := batchMaker.GetSize()
if s.IsTarget() {
// Assumption is that the target will not drop any record.
// Records are sent to destination or to the error sink.
outputRecordsCount = inputRecordsCount - stageErrorRecordsCount
}
s.inputRecordsCounter.Inc(inputRecordsCount)
s.inputRecordsMeter.Mark(inputRecordsCount)
s.inputRecordsHistogram.Update(inputRecordsCount)
s.outputRecordsCounter.Inc(outputRecordsCount)
s.outputRecordsMeter.Mark(outputRecordsCount)
s.outputRecordsHistogram.Update(outputRecordsCount)
s.errorRecordsCounter.Inc(stageErrorRecordsCount)
s.errorRecordsMeter.Mark(stageErrorRecordsCount)
s.errorRecordsHistogram.Update(stageErrorRecordsCount)
s.stageErrorsCounter.Inc(stageErrorMessagesCount)
s.stageErrorsMeter.Mark(stageErrorMessagesCount)
s.stageErrorsHistogram.Update(stageErrorMessagesCount)
if len(s.Stage.config.OutputLanes) > 0 {
for _, lane := range s.Stage.config.OutputLanes {
laneCount := int64(len(batchMaker.GetStageOutput(lane)))
s.outputRecordsPerLaneCounter[lane].Inc(laneCount)
s.outputRecordsPerLaneMeter[lane].Mark(laneCount)
}
}
return nil
}
func (s *StagePipe) Destroy() {
s.Stage.Destroy()
}
func (s *StagePipe) IsSource() bool {
return s.Stage.stageBean.IsSource()
}
func (s *StagePipe) IsProcessor() bool {
return s.Stage.stageBean.IsProcessor()
}
func (s *StagePipe) IsTarget() bool {
return s.Stage.stageBean.IsTarget()
}
func NewStagePipe(stage StageRuntime, config execution.Config) Pipe {
stagePipe := &StagePipe{}
stagePipe.config = config
stagePipe.Stage = stage
stagePipe.InputLanes = stage.config.InputLanes
stagePipe.OutputLanes = stage.config.OutputLanes
stagePipe.EventLanes = stage.config.EventLanes
return stagePipe
}
|
package main
import (
"bufio"
"encoding/csv"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
)
const (
TIME_IDX int = 7
PERIOD_IDX int = 8
PLAYER_IDX int = 13
HOME_DESC_IDX int = 5
AWAY_DESC_IDX int = 32
QUARTER_TIME_SEC int = 12 * 60
)
type Season struct {
Id string
Players []string
Games map[string][]*GameEvent
}
type GameEvent struct {
Player string
Desc string
TimeSec int
Period int
MinLeft int
SecLeft int
}
func NewSeason(seasonId string) (*Season, error) {
fnGlob := path.Join(DATA_DIR, seasonId, "*.csv")
playersFile := path.Join(DATA_DIR, seasonId, "players.dat")
files, err := filepath.Glob(fnGlob)
if err != nil {
return nil, err
}
players, err := ReadPlayersFile(playersFile)
if err != nil {
return nil, err
}
season := &Season{
Id: seasonId,
Games: make(map[string][]*GameEvent, len(files)),
Players: players,
}
for _, file := range files {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
reader := csv.NewReader(f)
rows, err := reader.ReadAll()
if err != nil {
return nil, err
}
game := make([]*GameEvent, 0, len(rows))
for _, row := range rows {
gd, err := NewGameEventFromRow(row)
if err != nil {
// this is expected to log an error on the header of every game .csv file
//log.Printf("Error on NewGameEventFromRow: %v. Skipping row: '%v'", err, row)
continue
}
game = append(game, gd)
}
season.Games[file] = game
}
return season, nil
}
func ReadPlayersFile(playersFile string) ([]string, error) {
f, err := os.Open(playersFile)
if err != nil {
return nil, err
}
defer f.Close()
players := make([]string, 0)
reader := bufio.NewReader(f)
for {
player, err := reader.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
players = append(players, strings.ToLower(strings.Trim(player, " \n")))
}
return players, nil
}
func NewGameEventFromRow(row []string) (*GameEvent, error) {
var err error
gd := &GameEvent{
Player: strings.ToLower(row[PLAYER_IDX]),
Desc: fmt.Sprintf("%v%v", strings.ToLower(row[HOME_DESC_IDX]), strings.ToLower(row[AWAY_DESC_IDX])),
}
timeSplit := strings.Split(row[TIME_IDX], ":")
if len(timeSplit) != 2 {
return nil, fmt.Errorf("Malformed time: '%v'", row[TIME_IDX])
}
gd.MinLeft, err = strconv.Atoi(timeSplit[0])
if err != nil {
return nil, err
}
gd.SecLeft, err = strconv.Atoi(timeSplit[1])
if err != nil {
return nil, err
}
gd.Period, err = strconv.Atoi(row[PERIOD_IDX])
if err != nil {
return nil, err
}
periodBaseSec := (gd.Period - 1) * QUARTER_TIME_SEC
cumSecLeft := (gd.MinLeft * 60) + gd.SecLeft
gd.TimeSec = QUARTER_TIME_SEC - cumSecLeft + periodBaseSec
return gd, nil
}
func (gd *GameEvent) String() string {
return fmt.Sprintf("Q%v %v:%v | %v", gd.Period, gd.MinLeft, gd.SecLeft, gd.Desc)
}
func (gd *GameEvent) IsFGA() bool {
return !strings.Contains(gd.Desc, "free throw") && (strings.Contains(gd.Desc, "pts") || strings.Contains(gd.Desc, "miss"))
}
func (gd *GameEvent) IsFGM() bool {
return !strings.Contains(gd.Desc, "free throw") && strings.Contains(gd.Desc, "pts")
}
func (gd *GameEvent) Is3PA() bool {
return strings.Contains(gd.Desc, "3pt")
}
func (gd *GameEvent) Is3PM() bool {
return strings.Contains(gd.Desc, "3pt") && !strings.Contains(gd.Desc, "miss")
}
func (gd *GameEvent) IsFTA() bool {
return strings.Contains(gd.Desc, "free throw")
}
func (gd *GameEvent) IsFTM() bool {
return strings.Contains(gd.Desc, "free throw") && strings.Contains(gd.Desc, "pts")
}
type playerData struct {
Name string
NumSeasons float64
TotalMakes, TotalChances float64
ColdMakes, ColdChances float64
HotMakes, HotChances float64
TotalPct, ColdPct, HotPct float64
HotColdDiff, HotMakeup float64
}
func (pd *playerData) toRow() []string {
if pd.TotalChances == 0 {
pd.TotalPct = 0
pd.ColdPct = 0
pd.HotPct = 0
pd.HotMakeup = 0
} else if pd.HotChances == 0 {
pd.TotalPct = (float64(pd.TotalMakes) / float64(pd.TotalChances)) * 100
pd.ColdPct = (float64(pd.ColdMakes) / float64(pd.ColdChances)) * 100
pd.HotMakeup = 0
} else {
pd.TotalPct = (float64(pd.TotalMakes) / float64(pd.TotalChances)) * 100
pd.ColdPct = (float64(pd.ColdMakes) / float64(pd.ColdChances)) * 100
pd.HotPct = (float64(pd.HotMakes) / float64(pd.HotChances)) * 100
pd.HotMakeup = (float64(pd.HotChances) / float64(pd.TotalChances)) * 100
}
pd.HotColdDiff = pd.HotPct - pd.ColdPct
row := []string{
pd.Name,
fmt.Sprintf("%v", pd.TotalMakes),
fmt.Sprintf("%v", pd.TotalChances),
fmt.Sprintf("%v", pd.TotalPct),
fmt.Sprintf("%v", pd.ColdMakes),
fmt.Sprintf("%v", pd.ColdChances),
fmt.Sprintf("%v", pd.ColdPct),
fmt.Sprintf("%v", pd.HotMakes),
fmt.Sprintf("%v", pd.HotChances),
fmt.Sprintf("%v", pd.HotPct),
fmt.Sprintf("%v", pd.HotColdDiff),
fmt.Sprintf("%v", pd.HotMakeup),
}
return row
}
func (pd *playerData) toRowAveragedOverSeasons() []string {
if pd.TotalChances != 0 {
pd.TotalMakes = pd.TotalMakes / pd.NumSeasons
pd.TotalChances = pd.TotalChances / pd.NumSeasons
pd.ColdMakes = pd.ColdMakes / pd.NumSeasons
pd.ColdChances = pd.ColdChances / pd.NumSeasons
}
if pd.HotChances != 0 {
pd.HotMakes = pd.HotMakes / pd.NumSeasons
pd.HotChances = pd.HotChances / pd.NumSeasons
}
if pd.TotalChances == 0 {
pd.TotalPct = 0
pd.ColdPct = 0
pd.HotPct = 0
pd.HotMakeup = 0
} else if pd.HotChances == 0 {
pd.TotalPct = (float64(pd.TotalMakes) / float64(pd.TotalChances)) * 100
pd.ColdPct = (float64(pd.ColdMakes) / float64(pd.ColdChances)) * 100
pd.HotMakeup = -1
} else {
pd.TotalPct = (float64(pd.TotalMakes) / float64(pd.TotalChances)) * 100
pd.ColdPct = (float64(pd.ColdMakes) / float64(pd.ColdChances)) * 100
pd.HotPct = (float64(pd.HotMakes) / float64(pd.HotChances)) * 100
pd.HotMakeup = (float64(pd.HotChances) / float64(pd.TotalChances)) * 100
}
pd.HotColdDiff = pd.HotPct - pd.ColdPct
row := []string{
pd.Name,
fmt.Sprintf("%v", pd.TotalMakes),
fmt.Sprintf("%v", pd.TotalChances),
fmt.Sprintf("%v", pd.TotalPct),
fmt.Sprintf("%v", pd.ColdMakes),
fmt.Sprintf("%v", pd.ColdChances),
fmt.Sprintf("%v", pd.ColdPct),
fmt.Sprintf("%v", pd.HotMakes),
fmt.Sprintf("%v", pd.HotChances),
fmt.Sprintf("%v", pd.HotPct),
fmt.Sprintf("%v", pd.HotColdDiff),
fmt.Sprintf("%v", pd.HotMakeup),
}
return row
}
|
package gonigsberg
import (
"os"
"errors"
"bufio"
"strings"
"strconv"
"stringSet"
)
/*
Immutable graph
*/
type ImmutableGraph struct {
adj [][]int
nodes map[string]int
idxToID []string
}
/*
Creates a new Immutable graph from an edge list where edge list has the format:
# comment
nodeid nodeid
nodeid nodeid
...
returns an error if path is invalid
*/
func NewImmutableGraphFromEdgeList(path string) (*ImmutableGraph, error) {
g := new(ImmutableGraph)
// open file
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
lines := make([]string, 0)
// create reader
r := bufio.NewReader(file)
// read fist line
line, err := r.ReadString('\n')
// while there is no error
for err == nil {
if (line[0] != '\n' && line[0] != '#'){
lines = append(lines, line)
}
line, err = r.ReadString('\n')
}
//create set of node ids
nodes := stringSet.NewStringSet()
for _, line := range lines{
vals := strings.Fields(line)
if len(vals) != 2 {
return nil, errors.New("Invalid file format")
}
nodes.Add(vals[0])
nodes.Add(vals[1])
}
// allocate data for graph
g.idxToID = make([]string, len(nodes))
g.nodes = make(map[string]int, len(nodes))
g.adj = make([][]int, len(nodes))
for i := 0; i < len(nodes); i++{
g.adj[i] = make([]int, 0)
}
// create maps for conversion from id to idx and the reverse
i := 0
for k, _ := range nodes{
g.idxToID[i] = k
g.nodes[k] = i
i++
}
for _, line := range lines{
fields := strings.Fields(line)
src := g.nodes[fields[0]]
dest := g.nodes[fields[1]]
g.adj[src] = append(g.adj[src], dest)
g.adj[dest] = append(g.adj[dest],src)
}
return g, nil
}
/*
to be implemented
*/
func NewImmutableGraphFromGraphML(path string) *ImmutableGraph {
return new(ImmutableGraph)
}
/*
to be implemented
*/
func NewImmutableGraphFromDot(path string) *ImmutableGraph {
return new(ImmutableGraph)
}
func (g *ImmutableGraph) nbrsFromIdx(idx int) []int {
return g.adj[idx]
}
/*
returns neighbors ids of id as a slice of strings
*/
func (g *ImmutableGraph) Neighbors(id string) []string{
idx := g.nodes[id]
nbrs := make([]string, len(g.adj[idx]))
for i, v := range g.adj[idx]{
nbrs[i] = g.idxToID[v]
}
return nbrs
}
/*
counts number of paths of length depth from source to each vertex reachable from
source in depth steps
*/
func (g *ImmutableGraph) CountPaths(source string, depth int) map[string] int {
// get source idx from source id
sourceIdx := g.nodes[source]
// visited nodes for each level
levels := make([][]int, depth)
// set for marking edges
traversedEdges := stringSet.NewStringSet()
levels[0] = make([]int, len(g.adj[sourceIdx]))
copy(levels[0], g.adj[sourceIdx])
// for each level
for i := 1; i < depth; i++ {
levels[i] = make([]int, 0, 16)
for _, v := range levels[i-1]{
for _, val := range g.adj[v]{
// create forward and reverse edge keys
edgeKey1 := strconv.Itoa(v) + strconv.Itoa(val)
edgeKey2 := strconv.Itoa(val) + strconv.Itoa(v)
// if we have not traversed this edge
if (!traversedEdges.Contains(edgeKey1)) || (!traversedEdges.Contains(edgeKey2)){
// mark edge as traversed
traversedEdges.Add(edgeKey1)
traversedEdges.Add(edgeKey2)
// visit node
levels[i] = append(levels[i], val)
}
}
}
}
// create return map
retMap := make(map[string]int)
// get counts of paths
for _, v := range levels[depth -1]{
retMap[g.idxToID[v]]++
}
return retMap
}
/*
returns connected components for immutable graph g
*/
func (g *ImmutableGraph) GetConnectedComponents() [][]string{
components := make([][]string, 0)
return components
} |
package main
import (
"github.com/gin-gonic/gin"
"github.com/kataras/iris"
"github.com/iris-contrib/template/pug"
_ "github.com/youkyll/goat/app/endpoint"
"github.com/youkyll/goat/app/view"
"github.com/youkyll/goat/app/endpoint/api"
"os"
)
func main() {
iris.UseTemplate(pug.New()).Directory("clients/templates", ".jade")
iris.Get("/app/*subpath", func (c *iris.Context) {
c.MustRender("main.jade", iris.Map{})
})
iris.Static("/assets", "clients/public", 1)
api.Routes()
iris.Listen(":8000")
// r := gin.Default()
// template(r)
// static(r)
//
// r.LoadHTMLGlob("clients/main.html")
// r.GET("/", func (c *gin.Context) {
// c.HTML(200, "main.html", gin.H{})
// })
//
//// endpoint.Routes(r)
// api.Routes(r)
//
// serve(r)
}
func serve(r *gin.Engine) {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "3000"
}
r.Run(":" + port)
}
func static(r *gin.Engine) {
r.Static(`/css`, `./templates/css`)
r.Static(`/js`, `./templates/js`)
r.Static(`/vendor`, `./templates/bower_components`)
r.Static(`/static`, `./static`)
}
func template(r *gin.Engine) {
r.SetHTMLTemplate(view.Template())
}
|
package main
import (
"log"
"sync"
"time"
)
const (
epoch = int64(1577808000000) // 设置起始时间(时间戳/毫秒):2020-01-01 00:00:00,有效期69年
timestampBits = uint(41) // 时间戳占用位数
datacenteridBits = uint(2) // 数据中心id所占位数
workeridBits = uint(7) // 机器id所占位数
sequenceBits = uint(12) // 序列所占的位数
timestampMax = int64(-1 ^ (-1 << timestampBits)) // 时间戳最大值
datacenteridMax = int64(-1 ^ (-1 << datacenteridBits)) // 支持的最大数据中心id数量
workeridMax = int64(-1 ^ (-1 << workeridBits)) // 支持的最大机器id数量
sequenceMask = int64(-1 ^ (-1 << sequenceBits)) // 支持的最大序列id数量
workeridShift = sequenceBits // 机器id左移位数
datacenteridShift = sequenceBits + workeridBits // 数据中心id左移位数
timestampShift = sequenceBits + workeridBits + datacenteridBits // 时间戳左移位数
)
type Snowflake struct {
sync.Mutex
timestamp int64
workerid int64
datacenterid int64
sequence int64
}
func (s *Snowflake) NextVal() int64 {
s.Lock()
now := time.Now().UnixNano() / 1_000_000
if s.timestamp == now {
s.sequence = (s.sequence + 1) & sequenceMask
if s.sequence == 0 {
for now <= s.timestamp {
now = time.Now().UnixNano() / 1_000_000
}
}
} else {
s.sequence = 0
}
t := now - epoch
if t > timestampMax {
s.Unlock()
log.Default().Fatalf("epoch must be between 0 and %d", timestampMax-1)
return 0
}
s.timestamp = now
r := int64((t)<<int64(timestampShift) | (s.datacenterid << int64(datacenteridShift)) | (s.workerid<<int64(workeridShift) | (s.sequence)))
s.Unlock()
return r
}
|
package main
import (
"fmt"
"net/http"
)
func httpSuccess(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "{ \"code\": 200 }")
}
func httpAPIError(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "{\"code\":401, \"message\": \"Invalid API key.\"}")
}
func httpWrongCity(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "{\"code\":\"404\",\"message\":\"city not found\"}")
}
func startTestServer() {
http.HandleFunc("/success", httpSuccess)
http.HandleFunc("/apierror", httpAPIError)
http.HandleFunc("/wrongcity", httpWrongCity)
http.ListenAndServe(":8080", nil)
}
|
package main
import (
"flag"
"fmt"
"github.com/IMQS/updater/updater"
"os"
)
const usageTxt = `commands:
buildmanifest <dir> Update manifest in <dir>
run Run in foreground (in console)
service Run as a Windows Service
download Check for new content, and download
apply If an update is ready to be applied, then do so
`
func main() {
flagConfig := flag.String("config", "", "JSON config file (must be specified)")
flag.Usage = func() {
os.Stderr.WriteString(usageTxt)
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
}
helpDie := func(msg string) {
if msg != "" {
fmt.Fprintf(os.Stderr, "%v\n", msg)
} else {
flag.Usage()
}
os.Exit(1)
}
errDie := func(err error) {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
flag.CommandLine.Parse(os.Args[1:])
cmd := flag.Arg(0)
upd := updater.NewUpdater()
init := func() {
if *flagConfig == "" {
helpDie("No config specified")
} else if err := upd.Config.LoadFile(*flagConfig); err != nil {
helpDie(err.Error())
}
if err := upd.Initialize(); err != nil {
helpDie(err.Error())
}
}
if cmd == "buildmanifest" {
if len(flag.Args()) != 2 {
helpDie("no directory specified")
}
root := flag.Arg(1)
if manifest, err := updater.BuildManifest(root); err != nil {
errDie(err)
} else {
if err := manifest.Write(root); err != nil {
errDie(err)
}
}
} else if cmd == "run" {
init()
upd.Run()
} else if cmd == "download" {
init()
upd.Download()
} else if cmd == "apply" {
init()
upd.Apply()
} else if cmd == "service" {
init()
if !upd.RunAsService() {
fmt.Printf("Unable to run as service\n")
}
} else if cmd == "" {
helpDie("")
} else {
helpDie("Unrecognized command: " + cmd)
}
}
|
package router
import (
"github.com/bqxtt/book_online/api/auth"
"github.com/bqxtt/book_online/api/handler"
"github.com/bqxtt/book_online/api/router/middleware"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
"log"
"net/http"
)
func Init() {
router := gin.Default()
router.Use(middleware.Cors(), middleware.Auth())
user := router.Group("/user")
{
user.POST("/register", handler.Register)
user.POST("/login", handler.Login)
user.GET("/info", handler.GetUserInfo)
user.POST("/info/update", handler.UpdateUserInfo)
}
admin := router.Group("/admin")
{
adminUser := admin.Group("/user")
{
adminUser.POST("/list", handler.ListAllUsers)
adminUser.DELETE("/delete/:userId", handler.DeleteUser)
}
adminBook := admin.Group("/book")
{
adminBook.DELETE("/delete/:bookId", handler.DeleteBook)
adminBook.POST("/update", handler.UpdateBook)
adminBook.POST("/create", handler.CreateBook)
bookRecord := adminBook.Group("/record")
{
bookRecord.POST("/borrow", handler.ListAllBorrowedBook)
bookRecord.POST("/return", handler.ListAllReturnedBook)
bookRecord.POST("/all", handler.ListAllBookRecords)
}
}
}
book := router.Group("/book")
{
book.POST("/list", handler.ListBooks)
bookRecord := book.Group("/record")
{
bookRecord.POST("/borrow", handler.ListBorrowedBook)
bookRecord.POST("/return", handler.ListReturnedBook)
bookRecord.POST("/all", handler.ListBookRecords)
}
book.POST("/borrow", handler.BorrowBook)
book.POST("/return", handler.ReturnBook)
}
router.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
router.GET("/token/admin", getAdminToken)
router.GET("/token/user", getUserToken)
if err := router.Run(":8080"); err != nil {
log.Fatalf("router run error: %v", err)
}
}
// generating jwt token for api test
// @Tags temp
// @Summary 生成管理员token
// @Description 生成管理员token
// @Accept json
// @Produce json
// @Success 200 {string} string
// @Router /token/admin [get]
func getAdminToken(c *gin.Context) {
token, err := auth.GenerateToken("10175101201", "admin")
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err.Error(),
})
return
}
c.String(http.StatusOK, token)
}
// @Tags temp
// @Summary 生成用户token
// @Description 生成用户token
// @Accept json
// @Produce json
// @Success 200 {string} string
// @Router /token/user [get]
func getUserToken(c *gin.Context) {
token, err := auth.GenerateToken("10175101201", "user")
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err.Error(),
})
return
}
c.String(http.StatusOK, token)
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/beta/compute_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta"
)
// Server implements the gRPC interface for NetworkEndpointGroup.
type NetworkEndpointGroupServer struct{}
// ProtoToNetworkEndpointGroupNetworkEndpointTypeEnum converts a NetworkEndpointGroupNetworkEndpointTypeEnum enum from its proto representation.
func ProtoToComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum(e betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum) *beta.NetworkEndpointGroupNetworkEndpointTypeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum_name[int32(e)]; ok {
e := beta.NetworkEndpointGroupNetworkEndpointTypeEnum(n[len("ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum"):])
return &e
}
return nil
}
// ProtoToNetworkEndpointGroupCloudRun converts a NetworkEndpointGroupCloudRun resource from its proto representation.
func ProtoToComputeBetaNetworkEndpointGroupCloudRun(p *betapb.ComputeBetaNetworkEndpointGroupCloudRun) *beta.NetworkEndpointGroupCloudRun {
if p == nil {
return nil
}
obj := &beta.NetworkEndpointGroupCloudRun{
Service: dcl.StringOrNil(p.Service),
Tag: dcl.StringOrNil(p.Tag),
UrlMask: dcl.StringOrNil(p.UrlMask),
}
return obj
}
// ProtoToNetworkEndpointGroupAppEngine converts a NetworkEndpointGroupAppEngine resource from its proto representation.
func ProtoToComputeBetaNetworkEndpointGroupAppEngine(p *betapb.ComputeBetaNetworkEndpointGroupAppEngine) *beta.NetworkEndpointGroupAppEngine {
if p == nil {
return nil
}
obj := &beta.NetworkEndpointGroupAppEngine{
Service: dcl.StringOrNil(p.Service),
Version: dcl.StringOrNil(p.Version),
UrlMask: dcl.StringOrNil(p.UrlMask),
}
return obj
}
// ProtoToNetworkEndpointGroupCloudFunction converts a NetworkEndpointGroupCloudFunction resource from its proto representation.
func ProtoToComputeBetaNetworkEndpointGroupCloudFunction(p *betapb.ComputeBetaNetworkEndpointGroupCloudFunction) *beta.NetworkEndpointGroupCloudFunction {
if p == nil {
return nil
}
obj := &beta.NetworkEndpointGroupCloudFunction{
Function: dcl.StringOrNil(p.Function),
UrlMask: dcl.StringOrNil(p.UrlMask),
}
return obj
}
// ProtoToNetworkEndpointGroup converts a NetworkEndpointGroup resource from its proto representation.
func ProtoToNetworkEndpointGroup(p *betapb.ComputeBetaNetworkEndpointGroup) *beta.NetworkEndpointGroup {
obj := &beta.NetworkEndpointGroup{
Id: dcl.Int64OrNil(p.Id),
SelfLink: dcl.StringOrNil(p.SelfLink),
SelfLinkWithId: dcl.StringOrNil(p.SelfLinkWithId),
Name: dcl.StringOrNil(p.Name),
Description: dcl.StringOrNil(p.Description),
NetworkEndpointType: ProtoToComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum(p.GetNetworkEndpointType()),
Size: dcl.Int64OrNil(p.Size),
Location: dcl.StringOrNil(p.Location),
Network: dcl.StringOrNil(p.Network),
Subnetwork: dcl.StringOrNil(p.Subnetwork),
DefaultPort: dcl.Int64OrNil(p.DefaultPort),
CloudRun: ProtoToComputeBetaNetworkEndpointGroupCloudRun(p.GetCloudRun()),
AppEngine: ProtoToComputeBetaNetworkEndpointGroupAppEngine(p.GetAppEngine()),
CloudFunction: ProtoToComputeBetaNetworkEndpointGroupCloudFunction(p.GetCloudFunction()),
Project: dcl.StringOrNil(p.Project),
}
return obj
}
// NetworkEndpointGroupNetworkEndpointTypeEnumToProto converts a NetworkEndpointGroupNetworkEndpointTypeEnum enum to its proto representation.
func ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnumToProto(e *beta.NetworkEndpointGroupNetworkEndpointTypeEnum) betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum {
if e == nil {
return betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum(0)
}
if v, ok := betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum_value["NetworkEndpointGroupNetworkEndpointTypeEnum"+string(*e)]; ok {
return betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum(v)
}
return betapb.ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnum(0)
}
// NetworkEndpointGroupCloudRunToProto converts a NetworkEndpointGroupCloudRun resource to its proto representation.
func ComputeBetaNetworkEndpointGroupCloudRunToProto(o *beta.NetworkEndpointGroupCloudRun) *betapb.ComputeBetaNetworkEndpointGroupCloudRun {
if o == nil {
return nil
}
p := &betapb.ComputeBetaNetworkEndpointGroupCloudRun{
Service: dcl.ValueOrEmptyString(o.Service),
Tag: dcl.ValueOrEmptyString(o.Tag),
UrlMask: dcl.ValueOrEmptyString(o.UrlMask),
}
return p
}
// NetworkEndpointGroupAppEngineToProto converts a NetworkEndpointGroupAppEngine resource to its proto representation.
func ComputeBetaNetworkEndpointGroupAppEngineToProto(o *beta.NetworkEndpointGroupAppEngine) *betapb.ComputeBetaNetworkEndpointGroupAppEngine {
if o == nil {
return nil
}
p := &betapb.ComputeBetaNetworkEndpointGroupAppEngine{
Service: dcl.ValueOrEmptyString(o.Service),
Version: dcl.ValueOrEmptyString(o.Version),
UrlMask: dcl.ValueOrEmptyString(o.UrlMask),
}
return p
}
// NetworkEndpointGroupCloudFunctionToProto converts a NetworkEndpointGroupCloudFunction resource to its proto representation.
func ComputeBetaNetworkEndpointGroupCloudFunctionToProto(o *beta.NetworkEndpointGroupCloudFunction) *betapb.ComputeBetaNetworkEndpointGroupCloudFunction {
if o == nil {
return nil
}
p := &betapb.ComputeBetaNetworkEndpointGroupCloudFunction{
Function: dcl.ValueOrEmptyString(o.Function),
UrlMask: dcl.ValueOrEmptyString(o.UrlMask),
}
return p
}
// NetworkEndpointGroupToProto converts a NetworkEndpointGroup resource to its proto representation.
func NetworkEndpointGroupToProto(resource *beta.NetworkEndpointGroup) *betapb.ComputeBetaNetworkEndpointGroup {
p := &betapb.ComputeBetaNetworkEndpointGroup{
Id: dcl.ValueOrEmptyInt64(resource.Id),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
SelfLinkWithId: dcl.ValueOrEmptyString(resource.SelfLinkWithId),
Name: dcl.ValueOrEmptyString(resource.Name),
Description: dcl.ValueOrEmptyString(resource.Description),
NetworkEndpointType: ComputeBetaNetworkEndpointGroupNetworkEndpointTypeEnumToProto(resource.NetworkEndpointType),
Size: dcl.ValueOrEmptyInt64(resource.Size),
Location: dcl.ValueOrEmptyString(resource.Location),
Network: dcl.ValueOrEmptyString(resource.Network),
Subnetwork: dcl.ValueOrEmptyString(resource.Subnetwork),
DefaultPort: dcl.ValueOrEmptyInt64(resource.DefaultPort),
CloudRun: ComputeBetaNetworkEndpointGroupCloudRunToProto(resource.CloudRun),
AppEngine: ComputeBetaNetworkEndpointGroupAppEngineToProto(resource.AppEngine),
CloudFunction: ComputeBetaNetworkEndpointGroupCloudFunctionToProto(resource.CloudFunction),
Project: dcl.ValueOrEmptyString(resource.Project),
}
return p
}
// ApplyNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Apply() method.
func (s *NetworkEndpointGroupServer) applyNetworkEndpointGroup(ctx context.Context, c *beta.Client, request *betapb.ApplyComputeBetaNetworkEndpointGroupRequest) (*betapb.ComputeBetaNetworkEndpointGroup, error) {
p := ProtoToNetworkEndpointGroup(request.GetResource())
res, err := c.ApplyNetworkEndpointGroup(ctx, p)
if err != nil {
return nil, err
}
r := NetworkEndpointGroupToProto(res)
return r, nil
}
// ApplyNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Apply() method.
func (s *NetworkEndpointGroupServer) ApplyComputeBetaNetworkEndpointGroup(ctx context.Context, request *betapb.ApplyComputeBetaNetworkEndpointGroupRequest) (*betapb.ComputeBetaNetworkEndpointGroup, error) {
cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyNetworkEndpointGroup(ctx, cl, request)
}
// DeleteNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Delete() method.
func (s *NetworkEndpointGroupServer) DeleteComputeBetaNetworkEndpointGroup(ctx context.Context, request *betapb.DeleteComputeBetaNetworkEndpointGroupRequest) (*emptypb.Empty, error) {
cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteNetworkEndpointGroup(ctx, ProtoToNetworkEndpointGroup(request.GetResource()))
}
// ListComputeBetaNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroupList() method.
func (s *NetworkEndpointGroupServer) ListComputeBetaNetworkEndpointGroup(ctx context.Context, request *betapb.ListComputeBetaNetworkEndpointGroupRequest) (*betapb.ListComputeBetaNetworkEndpointGroupResponse, error) {
cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListNetworkEndpointGroup(ctx, request.Project, request.Location)
if err != nil {
return nil, err
}
var protos []*betapb.ComputeBetaNetworkEndpointGroup
for _, r := range resources.Items {
rp := NetworkEndpointGroupToProto(r)
protos = append(protos, rp)
}
return &betapb.ListComputeBetaNetworkEndpointGroupResponse{Items: protos}, nil
}
func createConfigNetworkEndpointGroup(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package util
import (
"encoding/json"
"log"
"net"
"net/rpc"
"os"
"github.com/fatih/color"
)
// Contains - -
func Contains(vals []string, aVal string) bool {
for _, v := range vals {
if v == aVal {
return true
}
}
return false
}
// Remove - -
func Remove(vals []string, aVal string) []string {
newVals := []string{}
for _, v := range vals {
if v == aVal {
continue
}
newVals = append(newVals, v)
}
return newVals
}
// JSONMarshel - -
func JSONMarshel(val interface{}) string {
str, _ := json.Marshal(val)
return string(str)
}
// GetEnvMust get env, crashes if env key not set
func GetEnvMust(key string) string {
val := os.Getenv(key)
if val == "" {
log.Fatal("env key ", key, " missing")
}
return val
}
// NewRPC make an rpc server
func NewRPC(url string, rcvr interface{}) (*net.TCPListener, *rpc.Server, error) {
addy, err := net.ResolveTCPAddr("tcp", url)
if err != nil {
return nil, nil, err
}
inbound, err := net.ListenTCP("tcp", addy)
if err != nil {
return nil, nil, err
}
// rcp server
rpcServer := rpc.NewServer()
if err := rpcServer.Register(rcvr); err != nil {
return nil, nil, err
}
return inbound, rpcServer, nil
}
// LogColor - -
func LogColor(idx int) func(a ...interface{}) (n int, err error) {
return color.New([]color.Attribute{
color.FgGreen,
color.FgYellow,
color.FgBlue,
color.FgMagenta,
color.FgCyan,
}[idx]).Println
}
|
package main
import (
"github.com/adampresley/webframework/server"
"github.com/gobucket/gobucketserver/application"
)
func setupMiddleware(httpListener *server.HTTPListener, app *application.Application) {
httpListener.
AddMiddleware(app.Logger).
AddMiddleware(app.AccessControl).
AddMiddleware(app.OptionsHandler)
}
|
package nodenormal
import (
"fmt"
"time"
"github.com/fananchong/go-xserver/common"
"github.com/fananchong/go-xserver/common/utils"
nodecommon "github.com/fananchong/go-xserver/internal/components/node/common"
"github.com/fananchong/go-xserver/internal/db"
"github.com/fananchong/go-xserver/internal/protocol"
"github.com/fananchong/go-xserver/internal/utility"
)
// Session : 网络会话类
type Session struct {
*nodecommon.SessionBase
}
// NewSession : 网络会话类的构造函数
func NewSession(ctx *common.Context) *Session {
sess := &Session{}
sess.SessionBase = nodecommon.NewSessionBase(ctx, sess)
return sess
}
// Start : 开始连接 Mgr Server
func (sess *Session) Start() bool {
sess.connectMgrServer()
return true
}
func (sess *Session) connectMgrServer() {
TRY_AGAIN:
addr, port := getMgrInfoByBlock(sess.Ctx)
if sess.Connect(fmt.Sprintf("%s:%d", addr, port), sess) == false {
time.Sleep(1 * time.Second)
goto TRY_AGAIN
}
sess.Verify()
sess.registerSelf()
}
func (sess *Session) registerSelf() {
msg := &protocol.MSG_MGR_REGISTER_SERVER{}
msg.Data = &protocol.SERVER_INFO{}
msg.Data.Id = utility.NodeID2ServerID(sess.GetID())
msg.Data.Type = uint32(sess.Ctx.Node.GetType())
msg.Data.Addrs = []string{utils.GetIPInner(sess.Ctx), utils.GetIPOuter(sess.Ctx)}
msg.Data.Ports = sess.Ctx.Config.Network.Port
// TODO: 后续支持
// msg.Data.Overload
// msg.Data.Version
msg.Token = sess.Ctx.Config.Common.IntranetToken
sess.Info = msg.GetData()
sess.SendMsg(uint64(protocol.CMD_MGR_REGISTER_SERVER), msg)
sess.Ctx.Log.Infoln("Register your information with the management server, info:", msg.GetData())
}
// DoRegister : 某节点注册时处理
func (sess *Session) DoRegister(msg *protocol.MSG_MGR_REGISTER_SERVER, data []byte, flag byte) {
sess.Ctx.Log.Infoln("The service node registers information with me with ID ", utility.ServerID2UUID(msg.GetData().GetId()).String())
// 本地保其他存节点信息
targetSess := NewIntranetSession(sess.Ctx)
targetSess.Info = msg.GetData()
nodecommon.GetSessionMgr().Register(targetSess.SessionBase)
// 如果存在互连关系的,开始互连逻辑。
if sess.IsEnableMessageRelay() && targetSess.Info.GetType() == uint32(common.Gateway) {
targetSess.Start()
}
}
// DoVerify : 验证时保存自己的注册消息
func (sess *Session) DoVerify(msg *protocol.MSG_MGR_REGISTER_SERVER, data []byte, flag byte) {
}
// DoLose : 节点丢失时处理
func (sess *Session) DoLose(msg *protocol.MSG_MGR_LOSE_SERVER, data []byte, flag byte) {
sess.Ctx.Log.Infoln("Service node connection lost, ID is", utility.ServerID2UUID(msg.GetId()).String(), "type:", msg.GetType())
// 如果存在互连关系的,关闭 TCP 连接
if sess.IsEnableMessageRelay() && msg.GetType() == uint32(common.Gateway) {
targetSess := nodecommon.GetSessionMgr().GetByID(utility.ServerID2NodeID(msg.GetId()))
if targetSess != nil {
targetSess.Close()
}
}
nodecommon.GetSessionMgr().Lose2(msg.GetId(), common.NodeType(msg.GetType()))
sess.Ctx.Log.Infof("Remaining list of service nodes of this type[%d]:\n", msg.GetType())
nodecommon.GetSessionMgr().ForByType(common.NodeType(msg.GetType()), func(sessbase *nodecommon.SessionBase) {
sess.Ctx.Log.Infof("\t%s\n", utility.ServerID2UUID(sessbase.GetSID()).String())
})
}
// DoClose : 节点关闭时处理
func (sess *Session) DoClose(sessbase *nodecommon.SessionBase) {
go func() {
time.Sleep(1 * time.Second)
sess.connectMgrServer()
}()
}
// Ping : ping
func (sess *Session) Ping() {
msg := &protocol.MSG_MGR_PING{}
sess.SendMsg(uint64(protocol.CMD_MGR_PING), msg)
}
func getMgrInfoByBlock(ctx *common.Context) (string, int32) {
ctx.Log.Infoln("Try to get management server information ...")
data := db.NewMgrServer(ctx.Config.DbMgr.Name, 0)
for {
if err := data.Load(); err == nil {
break
} else {
ctx.Log.Errorln(err)
time.Sleep(1 * time.Second)
}
}
ctx.Log.Infoln("The address of the management server is", data.GetAddr())
ctx.Log.Infoln("The port of the management server is", data.GetPort())
return data.GetAddr(), data.GetPort()
}
|
package manager
import(
"download"
"stockdb"
"parser"
"handler"
"config"
//"fmt"
"os"
//"encoding/json"
)
type StockListManager struct {
config config.StockListConfig
download *download.StockDownloader
db *stockdb.StockListDB
}
func (s *StockListManager) Init() {
const filename = "../config/stocklist.json"
s.config = config.Parse(filename)
s.download = download.NewDownloader()
s.db = stockdb.NewStockListDB("chinastock")
}
func (s *StockListManager) Process() {
baseUrl := s.config.Sites.BaseUrl
categories := s.config.Sites.Categories
//fmt.Println(baseUrl)
//fmt.Println(len(categories))
for _, c := range categories {
//fmt.Println(i,c)
pageStr := s.download.GetPage(baseUrl, c.Type, c.Class)
//s.parser.ParseStr(pageStr)
h := handler.NewStockHandler()
parser := parser.NewTextParser(h)
parser.ParseStr(pageStr)
//fmt.Println(len(h.Stocks))
//stockstr := h.ToJson()
//s.WriteFile(c.Type + c.Class, stockstr)
for k, v := range h.Stocks {
v.Exchange = c.Exchange
h.Stocks[k] = v
}
s.db.TranInsert(h.Stocks)
//exchange := c.Exchange
//for _, st := range s.h.Stocks {
// fmt.Println(id)
// s.db.DeleteStock(st)
// s.db.InsertStock(exchange, st)
//}
}
}
func (s *StockListManager) WriteFile(filename string, content string) {
file, err := os.Create(filename)
defer file.Close()
if err != nil {
panic(err)
}
file.WriteString(content)
}
func NewStockListManager() *StockListManager {
m := new(StockListManager)
m.Init()
return m
}
|
package main
import (
"context"
"goimpulse/conf"
"net/http"
"goimpulse/lib"
"goimpulse/sender"
"time"
"fmt"
"io/ioutil"
"github.com/coreos/etcd/client"
"github.com/facebookgo/grace/gracehttp"
"github.com/labstack/echo"
log "github.com/sirupsen/logrus"
)
var masterHost string
func main() {
masterHost, _ = sender.GetMaster()
go watchMasterHost()
lib.OnReload()
e := echo.New()
e.GET("/getid", func(c echo.Context) error {
result := map[string]interface{}{
"id": -1,
"code": 0,
"msg": "success",
}
typeName := c.QueryParam("type")
httpClient := &http.Client{}
req, _ := http.NewRequest("GET", fmt.Sprintf("http://%s/getid?type=%s", masterHost, typeName), nil)
user, pass, _ := c.Request().BasicAuth()
req.SetBasicAuth(user, pass)
if !lib.CheckAuth(req) {
result["code"] = -1
result["msg"] = "auth failed"
return c.JSON(http.StatusForbidden, result)
}
res, err := httpClient.Do(req)
if err != nil {
pass := false
for i := 0; i < 5; i++ {
res, err = httpClient.Do(req)
if err == nil {
pass = true
break
}
time.Sleep(800 * time.Millisecond)
}
if !pass {
result["code"] = -1
result["msg"] = "error"
return c.JSON(http.StatusInternalServerError, result)
}
}
data, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return c.JSONBlob(http.StatusOK, data)
})
log.Info("node_manager running...")
e.Server.Addr = conf.Cfg.NodeManager.Host
e.Server.SetKeepAlivesEnabled(false)
e.Logger.Fatal(gracehttp.Serve(e.Server))
}
func watchMasterHost() {
ec := lib.GetEtcd()
kapi := client.NewKeysAPI(ec)
watcher := kapi.Watcher(lib.MasterNode, nil)
for {
resp, _ := watcher.Next(context.Background())
if resp.Action == "expire" || resp.Action == "create" {
for i := 0; i < 3; i++ {
host, err := sender.GetMaster()
if err == nil {
masterHost = host
break
}
time.Sleep(100 * time.Millisecond)
}
}
time.Sleep(100 * time.Millisecond)
}
}
|
package main
import (
"flag"
"log"
"runtime"
nc "github.com/gered/nats-cli"
"github.com/nats-io/nats"
)
func usage() {
log.Fatalf("nats-sub [-s server] [-ts] [-tls] [-tlscert CERT_FILE] [-tlskey KEY_FILE] [-tlscacert CA_FILE] [-tlsverify] <subject>")
}
func main() {
log.SetFlags(0)
var url = flag.String("s", nats.DefaultURL, "NATS comma-separate server URL list")
var ts = flag.Bool("ts", false, "Display timestamp on logging output")
var tls = flag.Bool("tls", false, "Enable TLS")
var tlsCertPath = flag.String("tlscert", "", "Certificate file")
var tlsKeyPath = flag.String("tlskey", "", "Private key file for certificate")
var tlsCACertPath = flag.String("tlscacert", "", "Client certificate CA file")
var tlsVerify = flag.Bool("tlsverify", false, "Enable TLS connection verification")
flag.Usage = usage
flag.Parse()
if flag.NArg() < 1 {
usage()
return
}
var subject = flag.Arg(0)
if *ts {
log.SetFlags(log.LstdFlags)
} else {
log.SetFlags(0)
}
conn, err := nc.Connect(*url, *tls, *tlsCertPath, *tlsKeyPath, *tlsCACertPath, *tlsVerify)
if err != nil {
log.Fatalf("Failed to connect to NATS: %s", err)
}
conn.Subscribe(subject, func(msg *nats.Msg) {
log.Printf("[%s]: %s\n", msg.Subject, string(msg.Data))
})
conn.Flush()
err = conn.LastError()
if err != nil {
log.Fatal(err)
}
log.Printf("Listening on subject: %s\n", subject)
runtime.Goexit()
}
|
package chapter4
import (
"fmt";
"strconv"
"strings"
"unicode/utf8"
)
//注意 单个字符仍与C++一样使用单引号
//字符串是UTF-8字符的一个序列
//当字符是ASCII码时占用一个字节,其他字符根据需要占用2-4个字节
//UTF-8是被广泛使用的编码格式,包括xml JSON
//与C++ JAVA Python不同,Java始终使用2个字节
//GO不仅减少内存与硬盘的空间占用,并且不需要对UTF-8进行编码解码
//
//字符串是一种值类型,且值不可变,即创建某个文本后无法再次修改这个文本的内容
//这与Java有点类似,字符串是字节的数组
//
//Go支持以下2种形式的字面值
//1。解释字符串
//该类字符串使用双引号括起来,其中的相关的转义字符将被替换
//转义字符包括
//\n:换行符
//\r:回车符
//\t:tab 键
//\u 或 \U:Unicode 字符
//\\:反斜杠自身
//2。非解释字符串
//该类字符串使用反引号括起来,支持换行,例如
//`This is a raw string \n` 中的 `\n\` 会被原样输出。
//与C/C++不同,Go中的字符串是根据长度限定的,而不是特殊字符\0 [这是指的是C风格的字符串]
//string类型的零值是长度为0的字符串,即空字符串""
//一般的比较运算符(== !=。。。)通过在内存中按字节比较来实现字符串的对比
//可以通过len()来获取字符串所占的字节长度:len(str)
//字符串的内容(纯字节)可以通过标准索引法来获取,在中括号[]内写入索引,从0开始
//字符串 str 的第 1 个字节:str[0]
//第 i 个字节:str[i - 1]
//最后 1 个字节:str[len(str)-1]
//注意:这种转换方案只对纯ASCII码有效
//获取字符中某个字节的地址的行为是非法的例如:&str[i]
//字符串拼接符+
//s:= s1+s2
//注意:由于编译器行尾自动补全分号的原因,加号必须放在第一行
//也可使用+=
//在循环中使用+号拼接字符串并不是最高效的方法
//更好的办法是你爱我函数 strings.Join()
//还有更好的方法,使用字节缓冲bytes.Buffer的拼接
//
//通过将字符串看作是字节(byte)的切片(slice)来实现对其西直门外索引法的操作
func ExConutCharaters(){
var str string
str =`asSASA ddd dsjkdsjsこん dk`
fmt.Println("原数量是:",len(str))
var str1 []byte
str1 = []byte(str)
n:=utf8.RuneCount(str1)
fmt.Println("数量是:",n)
//注意 Go是不支持隐式类型转换的,因此不能将一个字符串与整数相加
//println多个参数是使用逗号来分割的
}
//作为一种基本数据类型,每种语言都有一些对于字符串的预定义处理函数,Go中使用strings包来完成
//对字符串的主要操作
//
func TestStrFunc(){
s := "01大1234aabbccddeeff"
str := "1234aabbccdd"
//HasPrefix判断字符串是否以prefix开头
res1:=strings.HasPrefix(str,"12")
//HasSuffix判断结尾
res2:=strings.HasSuffix(str,"cdd")
//
fmt.Println("前",res1," 后",res2)
//字符串包含关系
//Contains 判断字符串是否包含substr
fmt.Println("包含",strings.Contains(str,"bbc"))
//判断子字符串或字符在父字符串中出现的位置(索引)
//Index返回字符串str在字符串s中的索引(str的第一个字符的索引),-1表示字符串s中不包含字符串str
fmt.Println("包含的位置",strings.Index(s,str)) //5
fmt.Println("此位置的字符:", string(s[5]))
//LastIndex返回str在s中最后出现位置的索引,-1不包含
//如果要查询非ASCII编码的字符在父字符串中的位置,建议使用以下函数
//注意是单个的rune字符
//注意第二个参数是一个Rune类型
fmt.Println("包含的位置",strings.IndexRune(s,rune('大'))) //2
fmt.Println("此位置的字符:", string(s[2]))
//字符串的替换
//Replace用于将字符串str中的前n个字符串old替换为字符串new,并返回一个新的字符串
//如果n=-1则替换所有字符串old为字符串new
//注意:Go中string类型与Java一样是不可更改的,是一种值类型
//因此这个函数并非字面上直接修改原字符串的方法
fmt.Println("字符替换:",strings.Replace(str,"1234","oo",2))
fmt.Println("原字符串:",str) //不变
//统计字符串出现的次数
//Count用于计算字符串str在字符串s中出现的非重叠次数
//注意是非重叠的
fmt.Println("统计次数:",strings.Count(str,"a"))
//重复字符串
//Repeat用于重复count次字符串s并返回一个新的字符串
fmt.Println("重复字符串:",strings.Repeat(str,3))
//修改字符串的大小写
strings.ToLower(str)
strings.ToUpper(str)
//修剪字符串
//可以使用trimSpace(s)来剔除字符串开关与结尾的空白符号
//如果要剔除指定字符,则可以使用strings.Trim(s, "cut")来将开头与结尾的cunt去除
//该函数的第二个参数可以包含任何字符,如果只想剔除开头或结尾的字符串
//则可以使用TrimLeft或或者TrimRight来实现
//对于字符串中间的也要去除,可以使用Repalce来替换成空字符
//分割字符串
//strings.Fields(s)将会利用一个或多个空白符号来作为动态长度的分隔符将字符串
//分割成若干小块,并返回一个slice如果字符只包含空白符号,则返回一个长度为0的slice
//
//strings.Split(s, sep)用于自定义分割符号来对指定的字符串进行分割,同样返回slice
//
//因为这个2函数都返回slice,所以习惯使用for-range循环进行处理
for c := range str{
fmt.Print(c,' ')
}
fmt.Println()
//注意:因为string实则是一个值类型的数组,因此使用Print里如果不用格式化则会打印出值
//拼接slice到字符串
//Join用于将元素类型为stirng的slice使用分割符号业拼接组成一个字符串
var test_join []string
test_join = []string{"aa","bb","cc"}
fmt.Println("测试Join:",strings.Join(test_join," - "))
//从字符串中读取内容
//strings.NewReader(str)用于生成一个Reader并读取字符串中的内容
//然后返回指向该Reader的指针,从其他类型读取内容的函数还有
//Read()从[]byte中读取内容
//ReadByte()和ReadRune()从字符串中读取下一个byte或rune
r_str:="我的天!"
reader_r := strings.NewReader(r_str)
for curr_c,i:=rune(0),0;i<5;i++{
curr_c,_,_ = reader_r.ReadRune()
fmt.Print(curr_c," ")
}
//字符串与其他类型的转换
//与字符相关的类型转换都是通过strconv包实现的
//该包包含了一些变量用于获取程序运行操作系统下平台下int类型所占的位数
//如strconv.IntSize
//任何类型T转换为字符串总是成功的
//针对从数字类型转换到字符串,Go提供了以下函数
//strconv.Itoa(i int) string 返回数字i所表示的字符串类型的十进制数
//strconv.FormatFloat(f float64, fmt byte, prec int, bitSize int) string 将 64 位浮点型的数字转换为字符串,其中 fmt 表示格式(其值可以是 'b'、'e'、'f' 或 'g'),prec 表示精度,bitSize 则使用 32 表示 float32,用 64 表示 float64。
//将字符串转换为其它类型tp并不总是可能的,可能会在运行时抛出错误
//针对从字符串类型转换为数字类型
//strconv.Atoi(s string) (i int, err error) 将字符串转换为 int 型。
//strconv.ParseFloat(s string, bitSize int) (f float64, err error) 将字符串转换为 float64 型。
//
//利用多返回值的特性,这些函数会返回2个值
fmt.Println()
fmt.Println("计算机的IntSize",strconv.IntSize)
} |
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package executor
import (
"fmt"
"github.com/koderover/zadig/pkg/microservice/predator/core/service"
)
func Run() error {
pred, err := service.NewPredator()
if err != nil {
fmt.Println("Failed to start predator, the error is:", err)
return err
}
if err := pred.BeforeExec(); err != nil {
fmt.Println("Failed to run before exec step, the error is:", err)
return err
}
if err := pred.Exec(); err != nil {
fmt.Println("Failed to run exec step, the error is:", err)
return err
}
if err := pred.AfterExec(); err != nil {
fmt.Println("Failed to run after exec step, the error is:", err)
return err
}
return nil
}
|
package pipeline
import (
"log"
"testing"
)
func TestPipelineProcess(t *testing.T) {
inFunc := func(proc PipelineProcess, msg PipelineMessage) PipelineMessage {
val, ok := msg.Content.(int)
if ok {
msg.Content = val + 1
} else {
t.Error("Message Content is not int")
}
return msg
}
outChannel := make(PipelineChannel)
proc, _ := NewPipelineProcess("test_proc", inFunc)
msg := PipelineMessage{
Direction: PipelineInDirection{},
Content: 0,
}
go proc.InProcess(outChannel)
for i := 0; i < 5; i++ {
log.Printf("sending msg %+v", msg)
proc.InQueue(msg)
log.Println("Waiting for result..")
res := <-outChannel
log.Printf("Got result %+v", res)
val, ok := res.Content.(int)
if ok {
if val != i+1 {
t.Errorf("msg Content: %d != %d", val, i+1)
}
} else {
t.Errorf("Invalid msg Content")
}
msg = res
}
proc.Close()
}
|
package card
type Card struct {
CardType int //牌类型
CardNo int //牌编号
CardId int //牌唯一标识符
}
//是否同一类型的牌
func (card *Card) SameCardTypeAs(other *Card) bool {
if other == nil || card == nil {
return false
}
return other.CardType == card.CardType
}
func (card *Card) SameCardNoAs(other *Card) bool {
if other == nil || card == nil {
return false
}
return other.CardNo == card.CardNo
}
func (card *Card) SameAs(other *Card) bool {
if other == nil || card == nil {
return false
}
if other.CardType != card.CardType {
return false
}
if other.CardNo != card.CardNo {
return false
}
return true
}
func (card *Card) MakeKey() int64 {
var ret int64
ret = int64(card.CardNo<<48) | int64(card.CardType<<32) | int64(card.CardId)
return ret
}
func (card *Card) MakeID(num int) int {
var ret int
ret = card.CardNo*100 + card.CardType*10 + num
return ret
}
func (card *Card) Next() *Card {
if card == nil {
return nil
}
if card.CardNo == 13 {
return nil
}
return &Card{
CardType: card.CardType,
CardNo: card.CardNo + 1,
}
}
func (card *Card) Prev() *Card {
if card == nil {
return nil
}
if card.CardNo == 1 {
return nil
}
return &Card{
CardType: card.CardType,
CardNo: card.CardNo - 1,
}
}
func (card *Card) String() string {
if card == nil {
return "nil"
}
cardNameMap := cardNameMap()
noNameMap, ok1 := cardNameMap[card.CardType]
if !ok1 {
return "unknow card type"
}
name, ok2 := noNameMap[card.CardNo]
if !ok2 {
return "unknow card no"
}
return name
}
// 用于比牌点数大小
func (card *Card) GetValue() int {
switch card.CardNo {
case 1:
return 100 // A 最大
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 5
case 6:
return 6
case 7:
return 7
case 8:
return 8
case 9:
return 9
case 10:
return 10
case 11:
return 11
case 12:
return 12
case 13:
return 13
}
return 1
}
func (card *Card) GetScore() int {
switch card.CardNo {
case 1:
return 1
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 5
case 6:
return 6
case 7:
return 7
case 8:
return 8
case 9:
return 9
case 10:
return 10
case 11:
return 10
case 12:
return 10
case 13:
return 10
}
return 1
}
func cardNameMap() map[int]map[int]string {
return map[int]map[int]string{
CardType_Fangpian: {
1: "A方片",
2: "2方片",
3: "3方片",
4: "4方片",
5: "5方片",
6: "6方片",
7: "7方片",
8: "8方片",
9: "9方片",
10: "10方片",
11: "J方片",
12: "Q方片",
13: "K方片",
},
CardType_Meihua: {
1: "A梅花",
2: "2梅花",
3: "3梅花",
4: "4梅花",
5: "5梅花",
6: "6梅花",
7: "7梅花",
8: "8梅花",
9: "9梅花",
10: "10梅花",
11: "J梅花",
12: "Q梅花",
13: "K梅花",
},
CardType_Hongtao: {
1: "A红桃",
2: "2红桃",
3: "3红桃",
4: "4红桃",
5: "5红桃",
6: "6红桃",
7: "7红桃",
8: "8红桃",
9: "9红桃",
10: "10红桃",
11: "J红桃",
12: "Q红桃",
13: "K红桃",
},
CardType_Heitao: {
1: "A黑桃",
2: "2黑桃",
3: "3黑桃",
4: "4黑桃",
5: "5黑桃",
6: "6黑桃",
7: "7黑桃",
8: "8黑桃",
9: "9黑桃",
10: "10黑桃",
11: "J黑桃",
12: "Q黑桃",
13: "K黑桃",
},
CardType_Xiaowang: {
14: "小王",
},
CardType_Dawang: {
14: "大王",
},
}
}
|
package main
import (
"context"
"log"
"net"
"google.golang.org/grpc"
"github.com/GreatLaboratory/go-grpc-example/data"
postpb "github.com/GreatLaboratory/go-grpc-example/protos/v1/post"
userpb "github.com/GreatLaboratory/go-grpc-example/protos/v1/user"
user_client "github.com/GreatLaboratory/go-grpc-example/simple-client-server"
)
const portNumber = "9001"
type postServer struct {
postpb.PostServer
userCli userpb.UserClient
}
// ListPostsByUserId returns post messages by user_id
func (s *postServer) ListPostsByUserId(ctx context.Context, req *postpb.ListPostsByUserIdRequest) (*postpb.ListPostsByUserIdResponse, error) {
userID := req.UserId
resp, err := s.userCli.GetUser(ctx, &userpb.GetUserRequest{UserId: userID})
if err != nil {
return nil, err
}
var postMessages []*postpb.PostMessage
for _, up := range data.UserPosts {
if up.UserID != userID {
continue
}
for _, p := range up.Posts {
p.Author = resp.UserMessage.Name
}
postMessages = up.Posts
break
}
return &postpb.ListPostsByUserIdResponse{
PostMessages: postMessages,
}, nil
}
// ListPosts returns all post messages
func (s *postServer) ListPosts(ctx context.Context, req *postpb.ListPostsRequest) (*postpb.ListPostsResponse, error) {
var postMessages []*postpb.PostMessage
for _, up := range data.UserPosts {
resp, err := s.userCli.GetUser(ctx, &userpb.GetUserRequest{UserId: up.UserID})
if err != nil {
return nil, err
}
for _, p := range up.Posts {
p.Author = resp.UserMessage.Name
}
postMessages = append(postMessages, up.Posts...)
}
return &postpb.ListPostsResponse{
PostMessages: postMessages,
}, nil
}
func main() {
lis, err := net.Listen("tcp", ":"+portNumber)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
userCli := user_client.GetUserClient("localhost:9000")
grpcServer := grpc.NewServer()
postpb.RegisterPostServer(grpcServer, &postServer{
userCli: userCli,
})
log.Printf("start gRPC server on %s port", portNumber)
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("failed to serve: %s", err)
}
} |
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logtail
import (
"context"
"testing"
"time"
)
func TestFastShutdown(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
l := Log(Config{
BaseURL: "http://localhost:1234",
}, t.Logf)
l.Shutdown(ctx)
}
var sink []byte
func TestLoggerEncodeTextAllocs(t *testing.T) {
lg := &logger{timeNow: time.Now}
inBuf := []byte("some text to encode")
n := testing.AllocsPerRun(1000, func() {
sink = lg.encodeText(inBuf, false)
})
if int(n) != 1 {
t.Logf("allocs = %d; want 1", int(n))
}
}
|
package payment
import "context"
type ReceiptRepository interface {
Put(ctx context.Context, src *Receipt) error
}
|
package dict
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"github.com/Kretech/xgo/encoding"
)
var (
ErrNotDict = errors.New(`parsed object is not map[string]interface{}`)
)
type MapDict struct {
data map[string]interface{}
}
func (d *MapDict) String() string {
return ""
}
func NewMapDict() *MapDict {
return &MapDict{
data: newMap(),
}
}
func (d *MapDict) IsEmpty() bool {
return len(d.data) == 0
}
func (d *MapDict) Len() int {
return len(d.data)
}
func (d *MapDict) Get(k interface{}) interface{} {
paths := strings.Split(toString(k), `.`)
var current interface{}
current = toMap(d.data)
size := len(paths)
for i := 0; i < size-1; i++ {
m := toMap(current)
current = m[paths[i]]
}
return toMap(current)[paths[size-1]]
}
func (d *MapDict) Set(k interface{}, v interface{}) {
paths := strings.Split(toString(k), `.`)
parent := d.data
size := len(paths)
for idx := 0; idx < size-1; idx++ {
//fmt.Println(idx, d.provider, parent, &d.provider, &parent)
seq := paths[idx]
i := parent[seq]
if _, ok := i.(map[string]interface{}); !ok {
parent[seq] = newMap()
parent = parent[seq].(map[string]interface{})
} else {
parent = i.(map[string]interface{})
}
}
parent[paths[size-1]] = v
}
func (d *MapDict) Forget(k interface{}) {
d.Set(k, nil)
}
func (d *MapDict) ParseJsonString(data []byte) (err error) {
d.data, err = JsonToMap(data)
return
}
func JsonToMap(data []byte) (m map[string]interface{}, err error) {
m = make(map[string]interface{})
var i interface{}
err = json.Unmarshal(data, &i)
if err != nil {
return
}
m, ok := i.(map[string]interface{})
if !ok {
return m, ErrNotDict
}
return
}
func (d *MapDict) Keys() (keys []string) {
for k := range d.data {
keys = append(keys, k)
}
return
}
func (d *MapDict) Values() (values []interface{}) {
for _, v := range d.data {
values = append(values, v)
}
return
}
func (d *MapDict) Filter(fn func(interface{}, string) bool) *MapDict {
instance := NewMapDict()
d.Each(func(v interface{}, k string) {
if fn(v, k) {
instance.Set(k, v)
}
})
return instance
}
func (d *MapDict) Each(fn func(interface{}, string)) {
for k, v := range d.data {
fn(v, k)
}
}
func (d *MapDict) Data() map[string]interface{} {
return d.data
}
func (d *MapDict) SetData(data map[string]interface{}) {
d.data = data
}
func (d *MapDict) Json() string {
return encoding.JsonEncode(d.data)
}
func toString(k interface{}) string {
switch k.(type) {
case fmt.Stringer:
return k.(fmt.Stringer).String()
case string:
return k.(string)
case int:
return strconv.FormatInt(int64(k.(int)), 10)
default:
return fmt.Sprintf("%v", k)
}
}
func newMap() map[string]interface{} {
return make(map[string]interface{})
}
func toMap(i interface{}) map[string]interface{} {
m, ok := i.(map[string]interface{})
if !ok {
//fmt.Printf("%v,%v\n", i, m)
m = make(map[string]interface{})
//fmt.Printf("%v,%v\n", i, m)
}
return m
}
|
package gohs
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
var hsAPIkey string
var errThreshold = 1
var cl = &http.Client{}
const coreURL = "https://api.hubapi.com"
//SetAPIKey Sets the API key for all subsequent functions to use
func SetAPIKey(key string) {
hsAPIkey = key
}
//SetErrorThresholds Sets the Error Thresholds
func SetErrorThresholds(et int) {
errThreshold = et
}
func checkKeyPresence() bool {
if hsAPIkey != "" {
return true
}
return false
}
//HSRequest is the request handler object for Hubspot
type HSRequest struct {
//Checked properties to advise which request function to activate
loaded bool
//HTTP Method Type: GET,POST,PUT Etc....
httpMethod string
//Request Headers: Content-Type etc...
headers []requestHeader
//Param Values
paramURL map[string]string
URL *url.URL
paramsURL url.Values
//
body []byte
//http request success response code
successCode int
hasOffset bool
offsetIdentifier string
returnedOffsetIdentifier string
useLimitAsOffset bool
limit string
limitIdentifier string
totalIndentifier string
}
type requestHeader struct {
name string
value string
}
func (rH *requestHeader) addHeader(req *http.Request) {
req.Header.Set(rH.name, rH.value)
}
//Load pass the parameters required to perform the request
func (hsR *HSRequest) Load(method, bodyURL, body, offsetIdentifier, returnedOffsetIdentifier, totalIndentifier, limitIdentifier, limit string, headerNames, headerValues, parameterNames, parameterValues []string, successCode int, useLimitAsOffset bool) error {
var (
err error
)
if !checkKeyPresence() {
return errors.New("no API key present")
}
if successCode == 0 || successCode >= 600 {
return errors.New("http success code invalid")
}
hsR.successCode = successCode
//Construct URL
if method == "" {
return errors.New("no http method set")
}
hsR.httpMethod = method
//offset situation
if offsetIdentifier != "" {
hsR.offsetIdentifier = offsetIdentifier
hsR.returnedOffsetIdentifier = returnedOffsetIdentifier
hsR.hasOffset = true
hsR.useLimitAsOffset = useLimitAsOffset
}
//limit offset situation
if hsR.useLimitAsOffset {
hsR.limit = limit
hsR.limitIdentifier = limitIdentifier
hsR.totalIndentifier = totalIndentifier
hsR.useLimitAsOffset = useLimitAsOffset
}
//Load Headers
if err = hsR.loadRequestHeaders(headerNames, headerValues); err != nil {
if err != nil {
return createError("loadHeaders: ", err)
}
}
//Construct URL
hsR.URL, _ = url.Parse(coreURL)
if bodyURL == "" {
return errors.New("no url body set")
}
hsR.URL.Path += bodyURL
//Define parameters
hsR.paramsURL = url.Values{}
//Add the API to parameters
hsR.paramsURL.Add("hapikey", hsAPIkey)
//Load URL Parameters
if err = hsR.loadURLParameters(parameterNames, parameterValues); err != nil {
if err != nil {
return createError("loadURLParameters: ", err)
}
}
if body != "" {
hsR.body = []byte(body)
} else {
hsR.body = nil
}
hsR.loaded = true
return nil
}
func (hsR *HSRequest) loadRequestHeaders(names, headers []string) error {
if len(names) != len(headers) {
return errors.New("header name/value array lengths do not match")
}
for i := range names {
hsR.headers = append(hsR.headers, requestHeader{names[i], headers[i]})
}
return nil
}
func (hsR *HSRequest) loadURLParameters(names, parameters []string) error {
if len(names) != len(parameters) {
return errors.New("header name/parameter array lengths do not match")
}
for i := range names {
hsR.paramsURL.Add(names[i], parameters[i])
}
return nil
}
//Do will utilise the loaded parameters to perform
func (hsR *HSRequest) Do() (interface{}, error) {
//Check that config was loaded
if !hsR.loaded {
return nil, errors.New("no config loaded")
}
var (
data interface{}
err error
)
if hsR.hasOffset {
if data, err = hsR.DoLoopRequest(); err != nil {
return nil, createError("hsR.DoLoopRequest: ", err)
}
} else {
//No offset is present therefor you only do a single request
var (
req *http.Request
resp *http.Response
retry = true
dataoutput = make(map[string]interface{})
errcount int
)
for retry {
hsR.URL.RawQuery = hsR.paramsURL.Encode()
fmt.Println(hsR.URL.String())
if req, err = http.NewRequest(hsR.httpMethod, hsR.URL.String(), bytes.NewBuffer(hsR.body)); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("http.NewRequest: ", err)
}
} else {
//Takes object hsR stored headers and applying to the request
applyHeadersToRequest(hsR, req)
//Do the request
if resp, err = cl.Do(req); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("cl.Do: ", err)
}
} else {
//Passes response code to handler
if retry, err = hsR.ErrorCodeHandler(resp); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("hsR.ErrorCodeHandler: ", err)
}
} else {
//Handling the body of the response
body, _ := ioutil.ReadAll(resp.Body)
//Checking if the body has data inside
if len(body) > 0 {
if err = json.Unmarshal(body, &dataoutput); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("json.Unmarshal: ", err)
}
} else {
retry = false
data = dataoutput
}
} else {
retry = false
}
}
}
}
}
}
return data, nil
}
func (hsR *HSRequest) DoLoopRequest() ([]map[string]interface{}, error) {
var (
data []map[string]interface{}
req *http.Request
resp *http.Response
err error
errcount int
offset = "0"
pastoffset string
total = -1
hasmore = true
retry bool
)
hsR.URL.RawQuery = hsR.paramsURL.Encode()
for hasmore {
retry = true
for retry {
var (
dataoutput = make(map[string]interface{})
offsetString string
)
fmt.Println(hsR.paramsURL, len(hsR.paramsURL))
if len(hsR.paramsURL) > 0 {
offsetString = "&" + hsR.offsetIdentifier + "=" + offset
} else {
offsetString = "?" + hsR.offsetIdentifier + "=" + offset
}
fmt.Println(hsR.URL.String() + offsetString)
if req, err = http.NewRequest(hsR.httpMethod, hsR.URL.String()+offsetString, bytes.NewBuffer(hsR.body)); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("http.NewRequest: ", err)
}
} else {
applyHeadersToRequest(hsR, req)
if resp, err = cl.Do(req); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("cl.Do: ", err)
}
} else {
//Passes response code to handler
if retry, err = hsR.ErrorCodeHandler(resp); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("hsR.ErrorCodeHandler: ", err)
}
} else {
//Handling the body of the response
body, _ := ioutil.ReadAll(resp.Body)
//Checking if the body has data inside
if len(body) > 0 {
if err = json.Unmarshal(body, &dataoutput); err != nil {
fmt.Println(err)
errcount++
if errcount >= errThreshold {
return nil, createError("json.Unmarshal: ", err)
}
} else {
//shift past offset
pastoffset = offset
if !hsR.useLimitAsOffset {
//get the offset value
offset = iterateSearchForOffset(dataoutput, hsR.returnedOffsetIdentifier)
fmt.Println(offset)
//handle offset
if offset == "" {
//fmt.Println(string(body))
return nil, errors.New("offset not found")
}
} else {
if total < 0 {
totalT := iterateSearchForOffset(dataoutput, hsR.totalIndentifier)
total, err = strconv.Atoi(totalT)
}
if offset, err = addIntegerToStringInteger(offset, hsR.limit); err != nil {
return nil, createError("addIntegerToStringInteger: ", err)
}
}
retry = false
data = append(data, dataoutput)
}
} else {
return nil, errors.New("request body not found")
}
}
}
}
}
//true 0 250 false 0 1
fmt.Println("before: ", hasmore, pastoffset, offset, retry, total, len(data))
if !hsR.useLimitAsOffset {
if (pastoffset != "0" && offset == "0") || pastoffset == offset {
hasmore = false
}
} else {
var offsetNum int
if offsetNum, err = strconv.Atoi(offset); err != nil {
return nil, createError("unable to convert limit offset to int", err)
}
if offsetNum >= total {
hasmore = false
}
}
fmt.Println("after: ", hasmore, pastoffset, offset, retry, total, len(data))
}
return data, nil
}
func iterateSearchForOffset(obj map[string]interface{}, pname string) string {
var (
exists bool
valueT interface{}
)
//Search current map for offset property
if valueT, exists = obj[pname]; exists {
return fmt.Sprintf("%.0f", valueT.(float64))
}
//Iterate to find embedded maps
for _, v := range obj {
//Perform recurisve search for object
if nobj, ok := v.(map[string]interface{}); ok {
if value := iterateSearchForOffset(nobj, pname); value != "" {
return value
}
}
}
return ""
}
func applyHeadersToRequest(hsR *HSRequest, req *http.Request) {
for i := range hsR.headers {
req.Header.Set(hsR.headers[i].name, hsR.headers[i].value)
}
}
/*
Error Handler
*/
var (
//ErrExcessRequest Avaliable error type that indicates that you have exceeded rate limits
ErrExcessRequest = errors.New("429 rate limit requests exceeded")
)
//ErrorCodeHandler Returns an informative errors based upon the error code passed
func (hsR *HSRequest) ErrorCodeHandler(resp *http.Response) (bool, error) {
if resp.StatusCode != hsR.successCode {
switch resp.StatusCode {
case 401:
return false, errors.New("401 authentication invalid")
case 403:
return false, errors.New("403 authentication permissions insufficient")
case 404:
return false, errors.New("404 unkown endpoint")
case 415:
return false, errors.New("415 unsupported media type")
case 429:
return false, ErrExcessRequest
case 502, 504:
time.Sleep(2 * time.Second)
return true, errors.New(strconv.Itoa(resp.StatusCode) + " timeout")
case 500:
time.Sleep(1 * time.Second)
return true, errors.New("internal server error")
default:
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(body))
return true, errors.New("unhandled code: " + strconv.Itoa(resp.StatusCode))
}
}
return true, nil
}
/*
Additional Convenience Functions
*/
func createError(s string, err error) error {
return errors.New(s + err.Error())
}
func addIntegerToStringInteger(s1, s2 string) (string, error) {
n1, err := strconv.Atoi(s1)
if err != nil {
return "", err
}
n2, err := strconv.Atoi(s2)
if err != nil {
return "", err
}
return strconv.Itoa(n1 + n2), nil
}
func compareStringIntGreaterEqual(s1, s2 string) (bool, error) {
n1, err := strconv.Atoi(s1)
if err != nil {
return false, err
}
n2, err := strconv.Atoi(s2)
if err != nil {
return false, err
}
if n1 >= n2 {
return true, nil
}
return false, nil
}
//SimplifyInterface Will take the route provided in the [] and append the [] to an existing array
func SimplifyInterface(data []map[string]interface{}, route []string) ([]interface{}, error) {
var base []interface{}
for i := 0; i < len(data); i++ {
indexdata, err := iterateInterface(data[i], route, 0)
if err != nil {
return nil, errors.New("iterateInterface: " + err.Error())
}
base = append(base, indexdata...)
}
return base, nil
}
func iterateInterface(data map[string]interface{}, route []string, index int) ([]interface{}, error) {
if value, exists := data[route[index]]; exists {
if index == len(route)-1 {
if _, ok := value.([]interface{}); !ok {
return nil, errors.New("unable to convert " + route[index] + " index to []interface{}")
}
return value.([]interface{}), nil
}
index++
if _, ok := value.(map[string]interface{}); !ok {
return nil, errors.New("unable to convert " + route[index] + " index to map[string]interface{}")
}
return iterateInterface(value.(map[string]interface{}), route, index)
}
return nil, errors.New(route[index] + " does not exist in map")
}
//ConvertIFCArrayToIFCMap Converts an []interface{} assumes it is all map[string]interface{} and returns an array
func ConvertIFCArrayToIFCMap(in []interface{}) (out []map[string]interface{}, err error) {
for i := range in {
var outV map[string]interface{}
var ok bool
if outV, ok = in[i].(map[string]interface{}); !ok {
err = errors.New("ConvertIFCArrayToIFCMap: unable to convert to IFC Array")
return
}
out = append(out, outV)
}
return
}
|
package timeout_test
import (
"testing"
"time"
"github.com/etf1/kafka-transformer/internal/timeout"
)
func TestWithoutTimeout(t *testing.T) {
f := func() interface{} {
time.Sleep(2 * time.Second)
return true
}
res := timeout.WithTimeout(5*time.Second, f)
if res == nil {
t.Errorf("unexpected result, should NOT be nil, got %v", res)
}
}
func TestWithTimeout(t *testing.T) {
f := func() interface{} {
time.Sleep(5 * time.Second)
return true
}
res := timeout.WithTimeout(2*time.Second, f)
if res != nil {
t.Errorf("unexpected result, should be nil, got %v", res)
}
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package osidm implements an identity manager using os functions.
//
// Copied from standard library os/user/*_unix.go with few modifications.
package osidm
import "github.com/avfs/avfs"
// CurrentUser returns the current user.
func (idm *OsIdm) CurrentUser() avfs.UserReader {
return currentUser()
}
// group
// Gid returns the group ID.
func (g *Group) Gid() int {
return g.gid
}
// Name returns the group name.
func (g *Group) Name() string {
return g.name
}
// user
// Gid returns the primary group ID of the user.
func (u *User) Gid() int {
return u.gid
}
// IsRoot returns true if the user has root privileges.
func (u *User) IsRoot() bool {
return u.uid == 0 || u.gid == 0
}
// Name returns the user name.
func (u *User) Name() string {
return u.name
}
// Uid returns the user ID.
func (u *User) Uid() int {
return u.uid
}
// CurrentUser returns the current user of the OS.
func CurrentUser() avfs.UserReader {
return currentUser()
}
|
package arrays
func findDisappearedNumbers(nums []int) []int {
retNums := make([]int, len(nums)+1)
for _, n := range nums {
retNums[n] = 1
}
var j int
for i, r := range retNums[1:] {
if r == 0 {
retNums[j] = i + 1
j++
}
}
return retNums[:j]
}
|
package main
import (
"fmt"
)
// Gorra : Tipo de dato personalizado
type Gorra struct {
marca string
color string
precio float32
plana bool
}
func main() {
//time.Sleep(time.Second * 5)
user := "Diego Abanto"
pais := "Rusia"
var suma = 8 + 9
var resta = 6 - 4
var nombre = "Diego "
var apellidos = "Abanto Arroyo "
var prueba = true
var flotante = 12.34
const year = 2018
fmt.Println(year)
fmt.Println(prueba)
fmt.Println(flotante)
fmt.Println("Hola Mundo desde Go con", user)
fmt.Println("Hola Mundo desde GO con " + nombre + apellidos + pais)
fmt.Println(suma)
fmt.Println(resta)
var numero1 float32 = 10
var numero2 float32 = 6
fmt.Println("Calculadora 1")
calculadora(numero1, numero2)
fmt.Println("-------------")
var numero3 float32 = 44
var numero4 float32 = 7
fmt.Println("Calculadora 2")
calculadora(numero3, numero4)
// Importar tipo de dato personalizado
/*
var gorraNegra = Gorra{
marca: "Nike",
color: "Negro",
precio: 25.05,
plana: false}
*/
var gorraNegra = Gorra{"Adidas", "Roja", 25.05, false}
//fmt.Println(gorraNegra)
fmt.Println(gorraNegra.marca)
holaMundo()
// Retorno de datos
fmt.Println(devolverTexto())
//Closures
fmt.Print("Pedido 1 --->")
fmt.Println(gorras(45, "EUR"))
fmt.Println("--------------")
fmt.Print("Pedido 2 --->")
fmt.Println(gorras(20, "USD"))
// Parametros
pantalon("rojo", "largo", "sin bolsillos", "nike")
// Arrays
/*
var peliculas [3]string
peliculas[0] = "La verdad duele"
peliculas[1] = "Ciudadano ejemplar"
peliculas[2] = "Gran Torino"
*/
/*
peliculas := [3]string{
"La verdad Duele",
"Ciudadano Ejemplar",
"Batman"}
*/
//Array Multidimensional
/*
var peliculas [3][2]string
peliculas[0][0] = "La verdad duele"
peliculas[0][1] = "Ciudadano ejemplar"
peliculas[1][0] = "Gran Torino"
peliculas[1][1] = "El Senior de los Anillos"
peliculas[2][0] = "Harry Potter"
peliculas[2][1] = "Mientras duermes"
*/
//Slices
peliculas := []string{
"La verdad Duele",
"Ciudadano Ejemplar",
"Batman",
"Superman"}
peliculas = append(peliculas, "Snowden")
fmt.Println(peliculas[0:3])
}
func pantalon(caracteristicas ...string) {
for _, caracteristica := range caracteristicas {
fmt.Println(caracteristica)
}
}
func gorras(pedido float32, moneda string) (string, float32, string) {
precio := func() float32 {
return pedido * 7
}
return "El precio del pedido es:", precio(), moneda
}
func devolverTexto() (dato1 string, dato2 int) {
dato1 = "Diego"
dato2 = 27
return
}
func holaMundo() {
fmt.Println("Hola mundo!")
}
func operacion(n1 float32, n2 float32, op string) float32 {
var resultado float32
if op == "+" {
resultado = n1 + n2
}
if op == "-" {
resultado = n1 - n2
}
if op == "/" {
resultado = n1 / n2
}
if op == "*" {
resultado = n1 * n2
}
return resultado
}
func calculadora(numero1 float32, numero2 float32) {
//Suma
fmt.Print("La suma es: ")
fmt.Println(operacion(numero1, numero2, "+"))
//Resta
fmt.Print("La resta es: ")
fmt.Println(operacion(numero1, numero2, "-"))
//Multiplicacion
fmt.Print("La multiplicacion es: ")
fmt.Println(operacion(numero1, numero2, "*"))
//Division
fmt.Print("La division es: ")
fmt.Println(operacion(numero1, numero2, "/"))
}
|
package lcd
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
"github.com/irisnet/irishub/app/protocol"
"github.com/irisnet/irishub/app/v2/coinswap"
"github.com/irisnet/irishub/client/context"
"github.com/irisnet/irishub/client/utils"
"github.com/irisnet/irishub/codec"
)
func queryLiquidity(cliCtx context.CLIContext, cdc *codec.Codec, endpoint string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["id"]
params := coinswap.QueryLiquidityParams{
Id: id,
}
bz, err := cliCtx.Codec.MarshalJSON(params)
if err != nil {
utils.WriteErrorResponse(w, http.StatusBadRequest, err.Error())
return
}
res, err := cliCtx.QueryWithData(
fmt.Sprintf("custom/%s/%s", protocol.SwapRoute, coinswap.QueryLiquidity), bz)
if err != nil {
utils.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
utils.PostProcessResponse(w, cliCtx.Codec, res, cliCtx.Indent)
}
}
|
package dummy
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/harriklein/pBE/pBEServer/log"
"github.com/harriklein/pBE/pBEServer/utils"
)
// swagger:route GET /dummies Dummy dummyList
// Return a dummy list from the database
// responses:
// 200: dummyListResponse
// Read gets the entire dummy list
func handleRead(aResponse http.ResponseWriter, aRequest *http.Request) {
log.Log.Debugln("Get dummies.")
aResponse.Header().Add("Content-Type", "application/json")
_withBrackets := aRequest.Header.Get("x-id-braces") != ""
// region GET PARAMS ---------------------------------------
_params := mux.Vars(aRequest)
var _id string
_id = _params["id"]
// endregion -----------------------------------------------
var _dummies TDummies
dbRead(&_dummies, _id, _withBrackets)
//pResponse.Header().Add("Content-Type", "application/json")
_error := utils.ToJSON(_dummies, aResponse)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummy - "+_error.Error()).ToJSON(aResponse)
return
}
}
// CreateOrApplyUpdates handles a single or bulk create(post) request
// In case of a bulk request using Action fields, it will be an Apply Update procedure
func handleCreateOrApplyUpdates(aResponse http.ResponseWriter, aRequest *http.Request) {
log.Log.Debug("CreateOrApplyUpdates dummies")
// region GET PARAMS ---------------------------------------
_params := mux.Vars(aRequest)
var _id string
_id = _params["id"]
// endregion -----------------------------------------------
// Read all body and put it into _body. It is necessary because we have to read only one time,
_body, _error := ioutil.ReadAll(aRequest.Body)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Reading body - "+_error.Error()).ToJSON(aResponse)
return
}
// log.Log.Debugln(string(_body)) // TODO: REMOVE IT
// Check if it is an array in order to identify an ApplyUpdates request.
// Otherwise, it is a single create request
_isArray := utils.IsArray(&_body)
// region VALIDATE ID: 2 = Array is not allowed in request with ID in URL
if _id != "" {
if _isArray {
utils.NewResponseError(http.StatusBadRequest, "Invalid content: Array not allowed for this request").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------------
if !_isArray {
// If it is not an array, attempt to Insert
log.Log.Debugln("Deserializing Object")
_dummy := &TDummy{}
_error := json.Unmarshal(_body, _dummy)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummy - "+_error.Error()).ToJSON(aResponse)
return
}
// region VALIDATE ID: 3 = Consistency between URL and Body
if _id != "" {
if _dummy.ID == "" {
_error := _dummy.ID.UnmarshalText([]byte(_id))
if _error != nil {
utils.NewResponseError(http.StatusBadRequest, "unable to convert ID").ToJSON(aResponse)
return
}
} else if _id != _dummy.ID.String() {
utils.NewResponseError(http.StatusBadRequest, "Mismatch IDs").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------
_respError := dbCreate(_dummy)
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusCreated, fmt.Sprintf("Created %s", _dummy.ID), nil).ToJSON(aResponse)
} else {
// If it is an array, get the action in Action field and attempt to Insert, Update or Delete
// If there is any error, rollback all actions
log.Log.Debugln("Deserializing Array")
_dummies := &TDummies{}
_error := json.Unmarshal(_body, _dummies)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummies - "+_error.Error()).ToJSON(aResponse)
return
}
_cIns, _cUpd, _cDel, _respError := dbApplyUpdates(_dummies, "I", false)
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusOK, fmt.Sprintf("%d requests >>> %d update(s) applied: %d insert(s), %d update(s) and %d delete(s)", len(*_dummies), (_cIns+_cUpd+_cDel), _cIns, _cUpd, _cDel), nil).ToJSON(aResponse)
}
}
// Update handles a single or bulk update(put) request
func handleUpdate(aResponse http.ResponseWriter, aRequest *http.Request) {
log.Log.Debug("Edit dummies")
// region GET PARAMS ---------------------------------------
_params := mux.Vars(aRequest)
var _id string
_id = _params["id"]
// endregion -----------------------------------------------
// Read all body and put it into _body. It is necessary because we have to read only one time,
_body, _error := ioutil.ReadAll(aRequest.Body)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Reading body - "+_error.Error()).ToJSON(aResponse)
return
}
// Check if it is an array in order to identify an ApplyUpdates request.
// Otherwise, it is a single request
_isArray := utils.IsArray(&_body)
// region VALIDATE ID: 2 = Array is not allowed in request with ID in URL
if _id != "" {
if _isArray {
utils.NewResponseError(http.StatusBadRequest, "Invalid content: Array not allowed for this request").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------------
if !_isArray {
log.Log.Debugln("Deserializing Object")
_dummy := &TDummy{}
_error := json.Unmarshal(_body, _dummy)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummy - "+_error.Error()).ToJSON(aResponse)
return
}
// region VALIDATE ID: 3 = Consistency between URL and Body
if _id != "" {
if _dummy.ID == "" {
_error := _dummy.ID.UnmarshalText([]byte(_id))
if _error != nil {
utils.NewResponseError(http.StatusBadRequest, "unable to convert ID").ToJSON(aResponse)
return
}
} else if _id != _dummy.ID.String() {
utils.NewResponseError(http.StatusBadRequest, "Mismatch IDs").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------
_respError := dbUpdate(_dummy)
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusOK, fmt.Sprintf("Edited %s", _dummy.ID), nil).ToJSON(aResponse)
} else {
// If it is an array, edit all records
// If there is any error, rollback all actions
log.Log.Debugln("Deserializing Array")
_dummies := &TDummies{}
_error := json.Unmarshal(_body, _dummies)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummies - "+_error.Error()).ToJSON(aResponse)
return
}
_cIns, _cUpd, _cDel, _respError := dbApplyUpdates(_dummies, "U", true) // Force UPDATE action in all records
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusOK, fmt.Sprintf("%d requests >>> %d update(s) applied: %d insert(s), %d update(s) and %d delete(s)", len(*_dummies), (_cIns+_cUpd+_cDel), _cIns, _cUpd, _cDel), nil).ToJSON(aResponse)
}
}
// Delete handles a single or bulk delete request
func handleDelete(aResponse http.ResponseWriter, aRequest *http.Request) {
log.Log.Debug("Remove dummies")
// region GET PARAMS ---------------------------------------
_params := mux.Vars(aRequest)
var _id string
_id = _params["id"]
// endregion -----------------------------------------------
// Read all body and put it into _body. It is necessary because we have to read only one time,
_body, _error := ioutil.ReadAll(aRequest.Body)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Reading body - "+_error.Error()).ToJSON(aResponse)
return
}
// Check if it is an array in order to identify an ApplyUpdates request.
// Otherwise, it is a single request
_isArray := utils.IsArray(&_body)
// region VALIDATE ID: 2 = Array is not allowed in request with ID in URL
if _id != "" {
if _isArray {
utils.NewResponseError(http.StatusBadRequest, "Invalid content: Array not allowed for this request").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------------
if !_isArray {
log.Log.Debugln("Deserializing Object")
_dummy := &TDummy{}
_error := json.Unmarshal(_body, _dummy)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummy - "+_error.Error()).ToJSON(aResponse)
return
}
// region VALIDATE ID: 3 = Consistency between URL and Body
if _id != "" {
if _dummy.ID == "" {
_error := _dummy.ID.UnmarshalText([]byte(_id))
if _error != nil {
utils.NewResponseError(http.StatusBadRequest, "unable to convert ID").ToJSON(aResponse)
return
}
} else if _id != _dummy.ID.String() {
utils.NewResponseError(http.StatusBadRequest, "Mismatch IDs").ToJSON(aResponse)
return
}
}
// endregion -----------------------------------------
_respError := dbDelete(_dummy)
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusOK, fmt.Sprintf("Deleted %s", _dummy.ID), nil).ToJSON(aResponse)
} else {
// If it is an array, delete all records
// If there is any error, rollback all actions
log.Log.Debugln("Deserializing Array")
_dummies := &TDummies{}
_error := json.Unmarshal(_body, _dummies)
if _error != nil {
utils.NewResponseError(http.StatusInternalServerError, "Deserializing Dummies - "+_error.Error()).ToJSON(aResponse)
return
}
_cIns, _cUpd, _cDel, _respError := dbApplyUpdates(_dummies, "D", true) // Force DELETE action in all records
if _respError != nil {
_respError.ToJSON(aResponse)
return
}
utils.NewResponse(http.StatusOK, fmt.Sprintf("%d requests >>> %d update(s) applied: %d insert(s), %d update(s) and %d delete(s)", len(*_dummies), (_cIns+_cUpd+_cDel), _cIns, _cUpd, _cDel), nil).ToJSON(aResponse)
}
}
|
package main // import github.com/HuiOnePos/flysnow
import (
"net/http"
_ "net/http/pprof"
"github.com/HuiOnePos/flysnow/fly"
"github.com/HuiOnePos/flysnow/tmp"
"github.com/HuiOnePos/flysnow/utils"
"github.com/sirupsen/logrus"
)
func main() {
logrus.SetLevel(logrus.DebugLevel)
utils.LoacConfig()
tmp.Init()
go func() {
logrus.Println(http.ListenAndServe(":7777", nil))
}()
fly.StartServer()
}
|
package main
var input = `6-10 p: ctpppjmdpppppp
17-19 l: llllllllllllllllllll
14-19 z: zrzzzzzztzzzzwzzzzk
1-8 k: qkkkkkkxkkkkkkkkk
5-6 x: xxxxvxx
8-14 n: nnnnnnnnnnnnkfnnnnnn
18-19 t: ttttttttfttttttttwtt
3-13 w: wwwqwwwwrqwtzwvw
1-3 b: bbrbb
8-14 q: mqwmqvfqqqsqqqqqwb
5-7 c: lxrvdcch
1-5 v: mvdrkmrrcjnjpv
2-8 j: jwbdjjjcjjjjj
4-6 w: bzcwrznhhkw
3-4 t: lttccfqlrvh
4-6 m: qmjmrmmmnmm
4-5 j: jjzjj
6-9 w: wwwwwwwbz
6-12 c: ccccxzhpfzhccdd
3-8 j: kjpbcjhsj
4-8 d: dtdvlbsdh
11-13 s: ssssssssssrsqs
5-6 k: tdkknvkp
7-9 q: nqqqqqbqd
2-5 l: qlvpl
10-11 q: qqjsqqcntrl
2-5 l: smlsl
8-9 n: tvkqjnwnt
2-4 j: jjtjm
6-8 x: xjxmxvppxgdx
2-7 p: lvpfgfpf
3-5 b: bbbbbbbbbb
2-4 z: ztdz
5-6 v: vmgmvh
3-4 q: xqzq
7-11 s: fsscssslnpsvsfdbssbs
11-13 m: mmmmnmmgmmmmmmm
2-5 j: jjkjjslmgrvjxjkj
3-9 x: rfwqjqsfxsnwqngs
8-10 g: dggggggggrggb
5-12 w: vkwfvmwldztwt
4-5 j: jfrjj
5-6 g: ggwgvqglhnggxwlzfrg
1-3 r: jrmr
6-9 h: hwhrhhhhkhh
12-13 w: wwclqxhghsfgtt
6-7 w: wwwwwlwwwwwnwww
2-7 z: hztlxbhz
3-8 n: vxnmjslfxngwtjds
4-5 h: hxhhx
4-7 k: kkkbkkk
2-3 b: brbs
2-5 s: csslqwg
1-2 z: kcdbz
6-13 w: nnwwpjfvfmwfbvpwzrcn
2-3 g: vgqgdcqcw
16-19 h: hhnhhhhhhhhhhhhhhhrh
3-10 s: ssbsbsssssssss
1-5 x: nxxxqx
9-15 q: qqnqqqqqqjqqwqqq
3-7 q: qqlqqqxq
6-7 z: zszzzgzzzzz
3-16 s: jssssrdsvsskmdssksss
7-16 f: hfffffffffffffffff
8-9 r: rrrrrnrrrrrr
4-5 h: hhhdqmjnvvdndfz
3-4 p: wppp
15-16 k: kmkkkkkkhkkkkkzjk
14-17 l: llnrlrljlttlrklrh
5-7 t: ttdgttfttvptp
5-6 q: jdqqqqqfqrqxz
7-9 l: llrllllvllclkll
14-18 m: mmmwmmmmmmmmmdmmmpm
5-11 n: nnnngnnnnncfn
2-5 d: ddrvd
7-9 f: xfjkhcsfff
1-5 j: njplmjjjjj
7-10 j: kjjdjsjnvvjj
1-5 q: vqkqq
11-13 b: bbbbbbbbbbvbtb
3-6 h: hmxphh
1-4 k: jkkb
13-15 k: kkkkkkkkkkkkbkkkkk
2-3 q: qnqq
12-17 k: kkkkkkkkkkkhkkkkkkk
7-8 p: ppphpppwppp
3-9 h: vxhgwzhkl
3-7 d: dsgddcwk
9-14 j: kjllchjnmpmjtjhzncd
8-11 t: ttztxwtlvfrwg
5-6 b: btnqnbdsbbnbf
12-15 l: lllllsxlxllvlflmllp
9-12 z: vbckpqzzzzpthsskdjx
5-6 m: smgmcmdcm
5-6 k: kkkkkjkk
7-8 l: lllllllrl
7-10 n: kgnllnqnnnckp
1-10 n: wnnknnnnwnnln
12-17 h: hhhwhhhbfhbzfhhmhhhh
3-10 q: qqjqqqqqqqqqqqqqq
14-17 x: xxxxxxxxxxtmxxxxxxx
4-5 c: mtzcb
2-7 q: smlnsqqqwkvlldj
2-11 h: ghxhnmhfprnmc
11-17 f: fffbffffffqfffffbn
12-14 s: cfsssssssssssspss
6-10 h: hhhhvwhhhh
8-9 v: vvzvlnvvvvw
7-8 k: kkwkkkmk
6-8 j: jjmjjrjjj
1-2 d: qwddd
10-17 x: xxxxvxxkfzxvxxlxxx
9-11 z: szzzzzzzzzqszzhs
4-6 g: ggsggnpfgfglglds
7-13 t: ttttttbtttttmtttttmt
3-4 r: rrrpr
4-5 c: zzcvc
9-11 k: xxpkndnhkkllx
13-14 m: jtbxmzgvmmmxmm
6-9 n: nnnnnnnnnn
2-3 z: fzzkqpzskj
15-17 t: tttttttttttttttttt
3-5 h: hhkqwgxrp
7-12 h: jhghcmhhmcpbxzhh
5-8 c: cdjccrcc
7-10 g: ggggkggggqwg
5-8 b: fttshqjghjkbfb
13-14 d: cdkmddcqdddddzdddd
9-14 g: ggggggggdggggxg
1-6 p: fkpppl
3-4 z: zfzrz
8-11 s: ldsjssnmsssssgsgs
8-9 z: zzfzhxztqzzlhgl
5-12 n: pgnvncfdnnwnlkvndt
1-3 r: lrrr
9-12 z: zzlzzqzzzzzzzzv
4-8 x: klhxtqpv
11-17 k: jkckvxckkjkkkxzrk
2-5 m: hknqmxh
12-18 n: nnnnnnrnddjnsnnnnrnn
2-6 r: ltgrwrrcxrrl
1-4 l: wllllll
14-16 t: tttttjttcttttttv
7-8 k: bkjdmbxf
1-12 v: vvvvvvvvvvvd
5-18 n: nqdlvbngztlkqnshcn
7-9 w: fwhqptwmwlkgf
15-18 v: vvvvvvvvvvvvvvvvvmv
6-8 g: grggxdhgbbrtmg
3-5 w: wtzwwv
3-4 d: ddrddt
2-4 c: cfccc
1-2 p: mgps
6-10 v: vvvvvjvvvvdv
1-6 w: wwdwwwkm
17-18 b: bbbbbbbbbbhbbbbbmbbb
4-11 s: swzshhfggxqp
4-15 b: bbbwbbbbbbbbbbbbb
9-14 h: hhhxhhhhmhhhzhqhfq
9-10 n: nknnnnnndn
15-17 q: qbqqqgtqqzzlklrmq
4-12 x: xxxxxxmxxxwwdxgxx
1-2 l: llcllqlzlhc
4-11 z: frrzhmvlwjm
8-9 f: ctsfbqdft
3-5 l: lwkmlflqlsl
7-11 x: cfkscxpmxjxtxl
4-7 l: bfxhlml
1-5 k: drkrxzkkxhtkbq
4-9 v: gvvvvqvvwvtv
2-6 k: kkkqklk
11-14 r: rrqdvdgrrbrrhb
19-20 h: hhhhhhhhhhhhhhmhhhhl
3-4 z: zrzz
1-5 x: xxxxxxx
18-19 k: kkkkkkkkkkkkkkkkkmk
7-14 b: bbbbbbwbbbbbbbbbbb
12-15 p: pbppppppwpkppphpwcpp
4-5 c: ccczjc
10-17 n: nnnnnknnndnnnnnnnnn
6-8 j: jzjjjjjk
7-10 g: gwggvggtgg
14-18 r: rrxrrwrhhrrcrqsdrr
10-15 p: pmjjpdvxwprlhhxbcv
4-9 h: bxchmcfxhhbfhvfsdxn
7-15 w: gfwczdrhwqmmmvw
3-9 d: zhttdhdvd
1-4 p: pppmpp
12-13 z: zfzzzlzzzzzzzzzz
4-5 p: ppppqwpxppp
3-20 x: ccxwrgxhjjkfnsdnjjnj
9-10 j: wjdjjhrzqq
17-19 t: tttttpttttzttttwtqtr
12-14 r: rrrrrtkrrrrlcdrr
2-13 b: bbbbbbbbbbbbgb
9-15 g: tgrphjgdgsglgmkm
10-14 c: pwvrltccjccczccbt
4-7 f: lffxfffz
7-8 r: rdrcvrrc
3-4 t: xtftv
10-13 r: rnrfrrrwrftkrrr
11-12 r: rrrrrrrrrrlrmrr
14-18 w: wwwwwwwwwwwwwwwwwwwr
1-8 b: bbbstbrbbbmbbb
2-3 x: xxxl
3-4 r: rrntr
2-5 k: pljkb
7-11 x: vzxxqxfhxgxqxxkxx
11-13 c: ccccflcccjmcv
9-11 l: nlvllllllll
10-11 t: ttttttlttdttttxtt
14-18 w: wwwwwwwwwwmwwgwwwz
8-10 g: grtxrzlscxtkjjwk
7-19 x: hkrxxxxxhxbvxxxxxkd
10-12 c: cccxppcfccmcnccccc
17-18 l: llllllllllllljllml
7-16 v: vvhbvvxvvvqvvvvpvvvv
12-18 q: qqqqqqqqqqqkqqqqqqq
1-3 v: crvd
11-13 b: zbbbbbbgbbhbb
9-11 v: vvvgvhvvlvv
3-4 r: rlrv
9-16 m: mmmmmmlnnmmmmmrmmmk
10-18 s: ngxsssxkxfmfswssvsfc
6-10 g: ggzgwngggggggtqgg
9-11 m: mmmmmmrmmmmmmm
3-4 l: jlrl
8-10 k: kkkfqkghbkzkkkkk
3-7 b: jwbhbhc
4-15 r: vklggxjgtgmzrlrw
16-18 f: fgnfvzxffprlpxwjrf
5-8 p: pprdhbzpp
10-12 r: rqmrkrfrkrrrrl
8-9 b: bbjkbbtbc
10-14 q: hqqqqqqvqmqzfqqqqqq
3-4 q: pwgq
10-11 m: mmmmmmmmmwm
9-11 d: tqrdldddddrjcddcdd
3-7 b: pbskmbb
12-13 b: fwbbbhbzbbfbrf
3-7 p: lppbnxvpphd
15-17 g: hpccghbgqtxgvggxg
2-7 m: sfwxfjmmrt
3-20 d: rgdbhfmhqqqfzbnjrddx
1-3 q: nqlmqnd
12-17 p: pppppppwfppppppppp
2-11 c: gctbkccdhqrgchsvccc
3-6 k: kxkkfk
9-10 n: nnnnnnnntf
2-5 z: zgzrzzz
5-6 g: gggggv
9-14 z: szzzzzzspzzczjzz
15-18 f: ppfvfhfffxfffpffrv
8-15 n: nnnqvnmkmndnddnb
2-3 z: vkzngkxsszvlvjk
5-9 q: qqqqwwqqqqqqq
3-13 m: mmhmmmmmmmmmgmmmmm
4-5 s: xssss
2-3 n: vnfn
8-9 d: drdddlsdndx
3-4 v: vvvn
2-8 c: ccgbxccfclcwc
1-4 b: mbbb
7-15 p: pnpqxnmmfkxqvppkpp
6-7 g: pkdgdpz
1-3 t: pxkttttjhttt
14-19 j: jkjjfjjjjjjjjjbjjjj
1-5 h: bwhhhlhh
16-17 z: xnzsgxztwbwnrlzzz
5-13 p: cspppppppzprkv
8-9 x: xsxnxwcxjxjbxxxp
13-14 w: wwwwwwwwwwwwrtww
3-15 w: wwbwwwwwwwwwwwdwwwww
6-7 d: dddbbdd
4-9 z: zzzzzzzzqzz
4-5 j: jjrwd
12-15 k: kkkkkkkhkkkkkkxkk
10-11 j: vjmjwjjxgjjhjm
8-9 v: skspbvwvp
1-14 n: drnwljkphztfcn
9-12 s: ssssssjswsshss
2-7 x: hxpcbdwjxs
3-7 n: bgnpmrm
4-15 d: dddfddddddddvdd
15-16 p: pppppjpppppppppxpp
8-14 r: rrfvrkbrgpqrplnvhxzr
2-5 c: cccccc
1-6 g: ptggggjl
11-12 g: gbgpfwwggsgh
1-2 z: wzjcmw
1-7 m: rmmmmmtmn
16-17 q: qqqqqqqqqcqqqqqql
1-3 l: lllhdwldl
7-8 k: gbhkkvkl
1-7 j: jmjjrdt
3-6 s: spsssxbsssss
7-13 v: vvvvvgvlvvvvbv
11-12 h: khhghhjffmvhk
17-18 s: sssssssssssssssssmss
8-16 t: ktcpqlrrtdttttxtkxn
9-11 b: bsbbqwbbcbbbwhv
5-10 k: kbtktnxdkzkkkrxtck
4-8 r: vrwwrvrr
18-19 x: lkxxqprxxxzrwkqxxgx
6-14 x: xxxxxxxxxxxxxdxx
10-13 p: fkpprwplppppbp
12-16 c: vcvgccccclbccgcctccc
2-8 c: cscclccczcgccnc
13-15 r: rrrrrrvrcrrrrrrrrrr
6-7 f: cfvfhbf
4-5 h: bcqbknlhwfm
3-18 g: chglkvkxgczqpnvgzp
1-3 h: fhbhh
1-2 v: vqtv
7-14 g: jgscbrgwgbggvglgkhd
2-8 p: plppppphp
11-13 c: ccsccclcccrcsccc
12-13 r: rkrmrsrrprrrrr
1-6 m: ndmmqg
1-3 k: jkskk
6-9 p: htffzfvrppwln
10-11 s: vksxsssssscdssw
5-8 s: svfcdwss
10-19 t: ttttwtttvttmttttttft
6-8 h: hhhhhhhgh
5-18 m: rcchmqmzlpptkcnhdh
1-3 m: mmdmzxtlm
1-7 l: lllllllbll
16-18 t: tttttttttttttttttttt
7-9 t: tttdjrtttkthjd
5-19 v: xthgjgvrdwtqbrsbgkv
2-6 k: whkgxk
11-13 q: qqqjqqvlqqqqdq
6-12 g: gggggfggggjncgg
9-14 q: qhqqqjkttqqqqw
6-9 n: nnnbnnnfgn
4-16 d: ddjldddpvddddrdd
13-14 r: rrrrrrrrrtrrkj
1-3 q: qqqq
3-6 s: hgtssssqgflb
10-14 h: hghhhghhrhhhhshhhh
3-5 s: pvsssgs
3-4 g: ggzbgxgggggggqgggg
2-4 d: dqdddddd
1-11 b: bbbxbdbrbbbbbbbbbbn
8-12 p: gppwpgpxpvpppbs
3-7 p: dpzppppp
7-20 r: zbczqrrpjpgrkrzdvmrn
9-15 w: wpwtwwwwwwwwwwvrkwr
14-16 p: pwpbpppppgpppmppp
8-9 q: ngzdqqjmqvj
4-9 t: tltltttktttttttt
2-4 w: kzmw
3-6 r: rrmrxqr
4-5 z: zzzzjz
4-8 w: wkwxbwwcwww
8-10 k: kkbqnkkqktxzkk
6-7 v: vvvvvnvvvvvvvvv
8-9 s: dmmgsmxssffbmtncj
7-9 f: ffdwfzqfffffms
11-18 b: jbcjdgppcjrhpbcswvsd
3-9 d: ckdcmmdhdldgsx
2-3 d: cqxvd
1-11 t: tftttstmftttttttttdt
2-3 d: ddtd
1-9 k: kkkkkkrkmkkkkg
6-11 d: dxkdthdxgxdslgsd
9-10 g: gggggfggkxg
6-7 c: ccccccccccc
15-18 z: zzzzzzzzzzzzzzxzzm
10-11 n: xnnlnrqnvnj
3-4 d: hdrz
4-5 h: wrtgh
2-4 n: nnnnn
11-12 g: ggggggngggggg
9-10 b: bbbbbbbbbn
2-10 j: vjgbvjlwxwts
3-11 t: jcttqqhtdkttcnttdfx
9-10 w: gzwwtwwwwwwwnw
12-14 t: ttttlljzpmwrcl
6-12 b: pzmbrbbvbkbbdctbbqb
1-3 m: mmmmh
13-15 r: rrrnrrrrtrrrrrl
9-12 l: psscllxhtllw
7-8 b: bbbbbbbbb
8-9 s: hmsztljsm
11-14 x: xxxxxbxxvxdxnx
3-6 b: cbhsbb
14-17 v: vvvvfvvvvvvvvvzvz
7-8 k: kkhvbwgk
18-20 h: vqnpzsvhplfhmmqjhhkl
4-9 n: nnnnnnnnjnnn
13-14 t: ttgttttjttttktttvqt
9-17 z: hwzlczpjzzbqcdgzj
9-11 c: cccccccczcv
2-8 s: skssssss
6-12 g: vtfrgjgvzhngd
10-11 v: vvvvtzjzvvbvvvvvvvv
5-7 j: jfjjzjgjjx
4-7 m: mhmmmnrmlmqw
7-11 q: jcntkgqrqcr
18-19 c: ccccccccmcxcccccccpc
5-6 m: gmmntmmpdhqmbfvcck
6-17 g: qgqgdwdvggfzrlbvgg
4-8 k: fbzqkmhh
11-12 c: vccccccccvmc
1-5 z: zzgzqzz
11-17 f: ffffffffffffffffff
8-9 v: jrcbrvslv
1-4 w: rwwww
10-12 x: xxnzxxxxxxxfxx
3-5 b: bzbcq
3-6 x: rndsxxr
11-13 n: svqnnnznnnnml
2-5 t: ttqwx
4-11 c: cfdfcsgnccc
12-13 s: sksssssvsssstss
2-4 s: sszhr
18-19 x: xxxqxxmxxxflxxxxxxx
4-14 b: nbbjbbbblbbbbb
15-17 q: qqrzmqqqqlktlhqrh
2-5 j: cjxstjtdmvz
1-18 s: ssssskssgxssshssst
12-16 m: hzkmmmwmmmmwmhmmmbm
5-7 m: mmmmmvv
1-4 r: rnln
19-20 k: kkkkkkkkkkkkkkkkkksk
12-18 t: ttlvttqrtbbpttztttt
9-15 w: jvqtwzwnhrmgjvwl
9-13 l: lrgdllkgbjvjljdmvmjt
3-5 g: ggtgggg
4-7 q: lrlqqkqmjqqwfqq
6-8 q: qqvqqchr
1-3 q: qltqr
13-18 w: bpwpwwgwwgwpfddwfwww
7-15 t: wttttthtttttttkttbt
1-6 n: tnnnnnnrnnnnnnlfn
6-7 p: kpwdppv
3-5 b: bmbzb
4-5 h: hqgqpbhj
2-12 k: fsxfrnjkmwgkw
1-14 g: ggggggxggggvgwg
2-16 d: dddddddddddddddmdddd
12-15 z: zzzzzzzzzzzzzzsz
13-19 z: zzzztzzzxvzzzzzzzzzz
10-11 d: dtlddhddsjf
3-11 x: xxxxxxxxxxxx
9-14 w: wwdwwwkwdwzwwwf
14-19 v: cpzcvwkqknscrvqtmfv
4-18 g: ggggggggggggggggggg
1-3 w: mwqwwwwwww
2-8 m: dmnwmjmzw
9-11 x: xxxxxxjvxxtfx
2-4 g: ggzb
6-9 k: kkksskkkx
9-10 m: kmmrmvvmmm
2-4 s: ssspsss
17-18 w: twjlvjbmhpxgchtnmcs
13-14 m: mmmmmmmmmmmmlmm
4-7 g: gggvggvgdgg
11-12 c: zbcchcjtcmhcbhcpcc
5-8 l: hlbhlmflk
1-5 s: sssssss
1-5 f: sfnffwfffffk
7-8 j: jjjjsjjj
2-3 x: rxlxvb
6-14 s: sqsksnsssdsdfds
16-17 v: vsvlpcvpkvvjvkvbj
1-12 r: hrrrrrrrrrrdr
2-3 m: mmqmmdm
10-13 h: hhqhhhhhhfhhbh
1-6 k: gkkkkkk
6-7 z: zzzzzzz
2-14 z: zqtzzzzdszszzzvzpnz
9-15 t: twczgnntkttghtvtbht
5-7 g: gfpvvkgk
10-15 z: bzzzszzztkzzzzzzzzz
4-5 z: zzfzzzjwxzcw
12-15 f: fffffffffffpffff
2-4 w: twwsft
3-9 b: srbwfkbcw
7-8 g: brwgmgjgdm
5-7 v: vvvvvvvvvv
7-14 n: mcznzntcndsqhrvmg
2-9 k: kvbpxbrjrkqqrzvkdvsk
4-6 x: xxxvxx
7-10 k: lkdmtlkrpk
1-6 b: bpxsqbbbbfk
3-6 c: cccccxcccccc
11-13 k: kkkkkkkkkkhkkk
6-7 q: fxnqtpqnxllqqg
13-19 t: dtxtthttttttttttztvt
4-5 h: dcvkh
2-5 v: rvstj
12-13 c: cccccccccccht
4-8 g: gvgggggvggggggg
1-4 g: gggg
13-15 x: xqxdxqtmxzfxxrq
4-5 c: ccphw
16-18 m: mmmmmmmmmmmmmmmmmh
12-18 c: cccccccccccrccccccc
4-6 h: zlkhfh
6-8 f: fffvsfblfbff
9-14 h: khddwkfzlpmqfhv
2-6 m: mcmmmm
11-13 d: dddrdddddldds
12-13 s: sqwhcxsqnskmsmtqvgr
2-10 b: hblmkzhlgjcb
1-10 h: bzwxpxqtjbhbmmf
15-16 r: rrrrrrrrrrjrrrrsr
6-12 s: sssssnssssssss
1-9 n: nnmnkngnw
4-6 x: xxxxxwxxjx
5-6 p: ppbktpjzdppj
13-17 s: sssssdssvsmjqfstpss
3-7 t: ctwtbkwjt
13-14 x: vxxxxxjflxnxtn
4-17 j: qmjjnjlqjjgpcjlwz
5-7 p: cvscpsvbcpvbfplqhpp
11-13 z: zzzzzzzzzzzzz
3-4 c: bqgkch
8-9 h: hhhhhhhld
10-13 p: bpppppppvpppkppn
5-6 l: lllllzlkqll
4-13 t: xbkssqrtvjthtx
2-3 q: vqfqr
7-9 l: llllnlllgllllll
4-5 h: hhhdh
3-5 z: zzzzrptzzxz
10-13 q: qjqkqqqzqqqqwq
3-5 v: vvbvvvv
7-16 j: nbbzpjmqmzfhjcjjz
3-7 s: swvssss
13-15 s: sskxmfsfsvpsssps
2-5 m: mmmmwm
5-7 b: hbbbbbsb
1-2 d: dfwffsnttmmn
4-17 g: gggvggfgggmgggggg
6-7 w: fwfnwwwv
3-5 c: ccccc
5-6 v: vxvvvvvvvv
6-8 p: zppppppppphh
3-9 f: nkfqxsffb
6-7 n: cnnjnnh
12-14 c: cvccccccccckcc
13-14 x: mxxxsxxxxxxxmxxx
2-3 w: wwsr
6-7 h: hhhhhdhh
9-11 b: wtbqzbbbbbcbnbbbbb
1-2 b: blbb
1-5 m: lwlmmxjx
2-6 k: kkkkkxk
2-10 j: fwcjktmrxmp
2-5 b: bbbblh
14-16 n: nnnnnnnnnnnnnnrnnnm
2-3 h: hzhxt
1-2 m: mmmb
12-13 b: bblbbmzqbbbbcbbnbb
15-17 g: xvggggqgmjwmgrhgg
3-4 t: tszs
2-5 l: lnllplll
16-17 p: pmpppppppppppppnplp
16-17 j: jkjjrjjjjjjjjjjxtjjz
6-11 j: jjjxjhjjjljjjjjj
7-16 n: xndnnljsnnggnfnqnnhn
3-5 n: wnnnbnnkj
9-13 r: rrrrrrrrjnrrrrr
1-6 h: jhhhhxh
12-16 w: qlwwgttwvwtfswwk
1-5 d: wbdgdglmdm
4-15 m: xwvxmvncxdvjbhq
4-6 h: hmmhhhns
4-13 n: nnncnnnnnnnnnnn
1-5 c: cpcvcc
1-14 h: hjdqjwffkrkxsnhh
1-5 d: dkdhf
3-7 c: mcbbvvc
7-10 p: xpppwpnppp
17-19 r: mrnrvpjtgttxppwwrkzj
1-7 q: qqtvqvqqfqn
6-17 s: qcsrpwspszcssssscxp
1-3 x: dtxlnlbqjqsnxxfxr
13-16 g: gggggggggggwgggg
2-8 j: xfjpwsqj
4-9 j: jjjnjjjjpjjj
2-7 j: gjfllhcswc
7-20 b: wphrpjmzfpxfcbpbmbfv
6-10 c: fclccchhrc
7-13 k: pvjmksdkjkdtkkxck
2-5 r: rhrrqrr
10-11 b: bbbbbbbbbbd
7-8 f: qfsjhfln
5-7 m: mmmmfmmm
5-7 d: xmdgwbdvd
7-9 t: tcttttntttt
5-6 j: khhjjd
10-11 l: llllflllllj
9-12 z: zzzzzzzmzzzzxzz
2-8 f: qvfnffff
8-12 v: nvvvvvvczhfvvvvvkk
8-10 x: xxtxxxxxxb
2-3 r: xrrw
4-14 q: nrszcbwqwzqjfqb
2-4 v: vqvq
4-5 l: llllml
15-16 k: kkkkkkkkkkkkkkkg
6-7 g: gggggggggg
14-15 s: ssssssssssdsscs
6-12 m: tsmmpmwfbwmmx
1-7 l: jqsspll
4-17 h: cprhghxcdvbvkkjfhdp
11-12 z: zqzzzgzzzzqz
4-5 x: bxpxx
2-4 n: knknnfnptzqdqbc
1-4 p: lppwp
10-15 m: txlmhfqmzcldstmpjx
4-6 w: jwjfcw
7-9 q: zqdqqqmwqqqvj
2-3 t: pttdkz
5-9 b: hlbbbnbmbtxcbnnhbgrb
4-6 d: dddddtldddrdnldd
5-6 x: xqxxxzxxx
4-6 n: ndnwjn
9-17 f: ffxffdpqfhfffftfmff
6-16 p: zmxqkgdwplcpnpmpcfr
1-15 p: lnpmpvktfpppnppmckz
5-13 n: lnnnnncnpnhntkknzcnn
10-14 k: kkkkkkkkkpkkkskk
4-5 p: ppppp
13-18 n: nnnnnnnnnnnnnnnnnn
4-5 g: fvsvsfhgpgngl
1-2 v: fvcvp
9-10 b: hbkgnddkbmk
11-14 h: xhhhhhhmhhhzhf
3-4 w: lgzw
4-8 l: ldnmhsblcqgsxdqjzgr
3-7 k: lskkkpzkk
8-9 j: jjjjjjjdjjjjjjj
7-13 r: rrxrprccwrhrrrlj
5-8 z: bvzrzzvzczzzdp
1-4 l: vlnll
4-6 k: kktkwkgdxb
1-2 n: qnhn
8-10 j: jjjjrsjbrljmsjj
10-12 h: pqhdkxhlchhg
10-11 p: phppppppnpkrpwp
1-6 k: tkkkkkkkkk
4-6 f: fcfffwnjkffwfffffff
5-8 h: qhghhhgjhhh
1-4 g: zggggggtbgg
4-6 k: kkwkkh
1-4 g: hkgmtd
5-7 c: cfhcckcl
8-11 f: fffcfxrfffbtfffwf
4-8 s: dssbsssnsss
8-18 t: clwqmjvttmkgxfdpxt
4-5 s: sscsjsc
3-4 q: zqqx
5-7 t: ftmttnxttz
9-11 m: mmmmmmmmrmmmmmm
9-14 m: hmkmmzmmlmvmmqmmmm
2-3 z: zzzz
4-5 m: mmmqp
7-14 n: xnkknkntrjtlgn
7-12 x: wrzfsmxxhtkxskdxxx
10-12 j: jjjjjjjjjjjrj
11-13 m: mmmmmmvmthmmz
12-15 z: pzzszlncbpzzmjzzwmjz
5-10 v: vvvvtvvvthv
6-8 j: jjjjjfjqfj
3-4 v: vkdvlv
4-5 c: cccfccccc
10-11 l: llklllllllll
13-15 m: mmmmmmmmmnbmmmmmm
2-4 w: jqvswww
4-5 w: wwqwlmww
8-9 b: fbkbbbbbb
15-16 v: vvvvvvvvvvvvvvdnvv
7-8 g: ggggggzh
8-11 x: mxxxxxxfxxx
1-2 f: fkff
1-3 k: kgjk
2-5 g: jgckspgbdfm
2-8 g: mcsbglgqtfzgfgln
7-11 t: tttttttttttttt
2-7 h: hhvphvhhghhrh
15-16 b: bbbbbbbbbbbbbbbb
10-11 b: bbhswbbvbrbbhkbbbwb
5-6 l: lbzhsrls
3-5 f: bcbffdvfkf
11-16 t: bztjnbvtkttktwttttt
8-17 d: pdpswddddddhtdfkdd
12-13 h: hhhznhqphxhhnhhhhs
1-6 k: shhkqkhtd
2-3 h: vhnj
11-14 z: zzzzzzzzzzszzzzzzzzz
5-7 v: vbvvpvv
1-3 g: gvgsgb
8-12 t: tlhfxnvgxkpt
4-7 t: krnhttl
16-17 t: tftttttttttqtttqhttb
17-18 z: wczzzzzvzdzzkzzshgg
12-20 p: pppppppppppwpppppppp
13-14 z: zzzzzlzzzzzzslzb
3-5 b: sfblqvzqgpkwq
8-9 r: rrrrrrrzr
3-10 h: fhkdhpslchccxx
8-9 r: vrrrrjrvrprr
2-4 r: rrxsgdhj
11-12 s: ssssssstsslr
4-8 q: pfqlqqplqtlfrh
3-12 l: wflllcfllvlwlqktlll
3-6 g: pmvvggggqgjg
19-20 w: wwwwwwwwwwwwwwwwwhlv
7-14 t: ttttlttttttttt
3-11 b: jbbwbhzpbbg
15-19 t: fbjvntgrrhdtbltzjkc
3-4 b: cqbbb
10-11 l: lllllllllts
1-8 z: xzzzzzzxzzzzzzzz
11-15 b: scbsrhqwjptfxfbjz
5-8 q: qqkqqmqld
12-17 b: bhbbbgsdbbbhncbzb
2-7 k: kkkkbkktzgk
7-14 m: mdmsmmgmmmmcmmmm
16-18 b: zqzlllfbdfrkwrwrslb
12-15 t: mzzztkqvcmvpdmtmft
1-4 c: ncqcdc
10-11 k: kkkkkkzkkkjkq
4-7 w: wwwswww
4-13 f: lnrcvxznlcfxtnfz
1-2 r: rprr
2-4 b: cbtxpjbbf
6-12 j: jjcjjjhjhzjg
16-17 x: xxxxxxzxxxmxxxxxxx
7-8 l: kwllgltm
1-6 c: cccckzcccl
1-6 k: nkkkkk
10-11 d: dddddgddjdddddddd
7-11 d: dddddddrddn
4-6 s: pshmskjsszcxw
4-7 x: zwnmxmxxxxxxbxnx
3-6 l: gbszlgm
4-14 h: hphhhnhqhghhhh
4-7 n: sbnnnwzkgzxvnnrn
5-8 h: hhhhhhhhh
11-13 j: jnjjjjqjjjvjjj
7-9 d: dddddcwdd
2-10 k: kmkkkkkkkd
5-7 j: jpdcczgzjjjnhtjdhzc
10-16 f: fffffffbfbftwjfj
2-3 c: nncc
1-7 z: dbzmkzzhrjrjgjkl
4-5 v: qzwvr
1-3 w: rwwnr
1-4 f: hffmff
5-6 w: wwwwtwww
4-12 r: vtfbqqfrnmqk
7-10 q: qqwqwqqqrc
1-2 d: dndd
5-10 c: rmrpcwcckcvf
1-2 d: vdfnl
2-11 j: jjjjjjjjjjvq
8-11 k: kkbkxkjtkxzksk
2-7 m: bcmxqdmcw
2-4 d: rdtdddt
11-12 p: zpppppzpxpvpzpp
3-8 f: fgvbvrbflh
10-11 b: bbfbbbbbbqdbbb
3-4 d: lkdn
6-7 b: bbbbsbbbr
3-5 b: bbbbbb
8-9 b: kbbbbbbbh
1-6 q: zgbbkjwdwprjlzcqtxl
5-12 p: ppppcpppppplp
4-5 s: zfscs
3-6 z: fszzcqrlwqtblhgzg
1-3 l: nswlxp
6-10 r: lkrrrxrnqzr
10-11 l: lllllllllll
5-6 x: xgrxpxxx
2-3 n: lmwn
5-6 d: bddnddsd
1-8 x: xxxxxxxrx
15-19 b: bbbbqbbbbbbbkbvbbbbb
12-14 h: mhhhqphghhhfnhbhczr
2-4 f: ffjfkk
1-2 m: mpsx
2-5 g: gpplg
1-2 v: pvvv
4-5 z: zzzbz
8-13 q: qqkkgnqttwgqknqdj
3-15 c: cmlfccwcbxcztzcx
7-9 q: hcqqqqqqqqssm
1-10 p: lppnpdpdll
6-7 f: bxjpxffjbfzqhrccts
4-5 j: cqljj
4-5 s: srsjjss
10-16 d: dcdddddbddhdzdcldddt
2-9 c: vcqhwjctthq
2-4 r: jvdbj
3-4 h: shxnz
1-2 c: ccccgcc
3-5 l: flhlllklwllgf
4-5 t: xqfctf
1-9 m: mmmmmmmmmmmmmmm
1-2 b: brbx
10-16 x: jjlgdbxhxxhvvvqb
2-3 s: spssss
10-11 x: xxvxxxxxxxx
2-15 x: qwbhmmbxwkflmqm
16-17 s: sssssssssssssssts
12-13 c: nxhfcctzkxgkcfcl
15-16 w: wwwwwwwwwwwdwwww
7-8 x: fmxlxtsmx
12-15 w: wwcwwwwwwwswwwnwmwk
10-11 m: xwkpzpvxgmm
1-5 l: plvllrlvlcllnlqll
2-3 m: prmrsns
2-4 w: rwwh
15-16 g: gggggnggggggfggng
9-11 s: tlghtdpfmsc
11-12 r: jttrrtcwkkdx
3-10 m: bqqqmmfrlm
1-8 v: nvdcvvjv
17-19 l: lllllllllllllllllvl
10-16 t: ttjtttkntttxtttktt
8-14 h: hhhhhdhrhhhhhhhr
2-8 f: ftfffffn
5-13 q: qxqqqqsmqdqqd
9-11 z: zzzzzjzbkzzz
10-11 r: rrrrrrrrrhlr
2-5 p: lpjqpkfhtdxm
12-13 j: jjjjjjjjjjjkbjj
6-12 l: zrghbwlcdxllq
11-13 w: wwwwwwwwwwwwtcl
2-6 v: jvvrvh
5-9 p: pvmpzpmdpp
2-9 k: kbkjkzkkxkfdkbf
8-17 z: jmxcvbjjgmjzzwzbzzz
6-8 k: kkklkkkjk
6-11 n: qdnknnnnmncnt
6-7 n: nnnnnjq
3-4 v: gncvr
8-9 q: wqkslqtqz
1-5 k: ljdjkk
6-7 w: kpzhwww
5-7 r: rrrrrrr
12-14 v: vvvvvvvvvvvvfm
2-4 h: htfh
4-14 t: qqgtbtdbftmmxtm
2-5 p: pjwmp
4-5 t: jjptqvpdm
17-19 w: wwwwwwwwwwwwwwwwwwx
10-11 g: mqdxmvggggg
13-18 w: lwqfhxvvvspcwqmpmwb
2-4 p: qxdr
6-8 b: bxrrxkkxv
1-4 h: mbmghhh
5-9 f: ptmffcjqcnfhd
4-10 d: bsbttdvdpl
1-3 n: znxn
1-5 q: qqtgq
1-10 q: nqqqqqqqqbq
3-4 r: rrtrm
12-13 d: fdfgtrdbdbjdj
8-9 c: ccccccczs
3-5 d: nqgjdv
10-11 q: qqqqqqqqqpq
9-10 m: mkcmjtmspf
5-7 q: jvpqjgq
7-8 g: gggggggm
5-7 r: brrtlpr
11-13 c: wlcqccbcchvfc
9-14 j: jmjgjjjjvrjrjjjjj
3-8 n: nnmknncxnpxtbnn
9-11 d: zdpddddvpbd
3-4 t: qktttlnb
4-7 f: fffmffjfgf
2-5 l: llxlwkl
2-6 b: qblblb
2-5 d: vddcd
5-6 w: wwwwwswwwwww
3-6 s: hsjszsfsknsscv
7-9 r: rrrrcrxrrr
8-9 x: xntxnxxxwx
10-13 k: ltkszmdqkkklqwbktrbq
6-7 w: mdwxbwdg
2-7 n: pcdhphd
8-14 k: kchdwzkfrwskmksqckd
1-6 q: qqqqqzqq
7-10 z: zzzzzzdzzz
1-4 c: ccgrfccc
10-13 x: zxxqxxtxbnxxxxxx
7-10 b: dbpbdlmwqpjwmtplxzw
10-13 v: vvtmvvpvvxvksvbvvvv
7-12 w: wwzcnwlswwrww
16-17 v: lvtlzvvnhtvvhvkkl
13-15 t: tttttttttwtttttt
3-4 h: dlvx
8-10 q: fmwqrqqkjq
11-20 x: xhwxxxjxxwtxtvxxnxxx
8-9 h: thqzhpdvkr
18-20 v: crrlvhvvdmvgvrfvvxvb
1-7 b: bbbbbbbbbbb
4-5 m: tjmnmmsmp
3-6 j: jsvxbj
4-10 v: mwlvhrttwvn
2-6 k: tgwhvrkt
4-9 x: hpsnbxzxcns
1-6 b: bbbbbbbbbbbbd
3-16 z: fzxstkqfcsmzkcjzf
5-9 g: hgzrgkvgh
1-2 l: lnlb
2-4 p: rrlpp
1-6 k: ckkkkkkkkkkkkk
11-16 j: jkjjjjjjwkdbjjjjjj
2-5 k: ktqksk
2-4 n: nhpbg
5-10 g: qlqfgmrdjxdfhvvbgxrc
9-18 x: xxxxxxxxgxxxxxxxxxpx
11-18 m: hmlcpjmmtwnmjwsjsm
4-7 b: gbrbgqbtx
3-11 w: wwdwwwwwwwnw
2-3 w: qhkqw
1-3 z: zrvzzzzz
4-15 s: lssbsssssssssscsssss
1-5 f: wffff
1-4 s: sssssssssss
15-17 l: lllcllnlllllllxll
4-5 h: hhthg
2-4 f: fffff
10-12 c: cccrwcccccckcc
7-8 f: fffpffmgf
1-11 w: wwwwwwwhwwt
1-9 q: mhqhngqqvvqq
17-19 v: vlthwqfvlfgjvbqdvpkk
4-19 v: vvvzvvvvvvvvvqvvvvvz
10-11 k: kkxkkkzkkkrkkk
4-12 m: wjmmtmqmmmmmmmmmm
3-14 g: ggggggggggggggg
6-7 w: wwwwwwww
4-5 c: clqtstsbfflngcfhgc
1-11 l: zljlllllxljll
3-7 x: xrtxxft
6-8 k: lkkmpkvkkk
1-9 f: glfffffffffffffffff
3-9 r: rrrrrrrrnr
9-10 r: rrrkrtgrrrrrrr
3-9 h: rvvxhnhmht
10-12 q: qpwqqjqqvqrnqqv
5-8 d: ddxddddlddd
1-7 p: lkppppnppkppp
1-4 d: mvsdd
6-7 m: zsmmmmhmmm
4-7 w: bwlzdglxrsgt
2-7 g: lgggtzdbvggqgjcggrq
3-4 r: rrtrj
7-12 l: lllllllllmlqlll
8-9 q: kvtnbfqqzl
8-9 h: qfkhhhrvh
3-4 c: cccjcccccccccc
4-5 v: dpmvwdpk
17-20 w: wwwwwwwwwwwwwwwwwwww
3-16 v: vvvcsfkvmchbbnvxvhzd
5-14 z: bvhrtqkhnwljrzmbvz
10-11 j: jjjjjjjjjpzj
12-14 w: wqwgwwwwwwbdws
1-3 s: dbntscqz
15-18 t: tttttgttttdtttmttt
1-3 l: llldhlnxrr
5-6 c: qcccqhccczccc
10-14 b: bbbvblvbpndqbbbqnbbb
1-2 c: pccchcc
2-7 x: fxsxbmksgjpwspp
4-5 x: wbbxbndlbbls
6-8 f: tfnwfwfsmf
3-9 q: qqqqvsqqkqq
6-17 h: chwghhrhlhhnbsncwc
3-6 w: wvwwkw
14-15 q: qqqqqqzqqqqqqfd
10-16 q: qzjqrvgwdqjqklqk
2-3 h: lxghjh
15-17 f: fvdgfffffffxzflmf
13-15 f: ffffffffffffbftffffr
11-12 g: jgqgbglrgbgfgw
8-9 d: hddddddpd
13-14 p: pppppppppppptx
4-5 w: rwpwwtwwc
8-9 r: rrgrrrrrrrrr
7-8 h: hhhhhhhbqf
12-13 l: llllllllllllll
3-4 n: nnntnn
3-8 c: ccccctcccc
6-7 x: xxgxxnkxfxxxxxfxxxxn
6-7 q: qqzqtzcqs
8-12 t: tttttttttttptt
2-4 q: qxql
3-4 v: vvxxv
8-11 t: tttttttcttm`
|
package cfmysql_test
import (
"errors"
. "github.com/andreasf/cf-mysql-plugin/cfmysql"
"github.com/andreasf/cf-mysql-plugin/cfmysql/cfmysqlfakes"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"os"
)
var _ = Describe("MysqlRunner", func() {
Context("RunMysql", func() {
var exec *cfmysqlfakes.FakeExecWrapper
var ioutilWrapper *cfmysqlfakes.FakeIoUtilWrapper
var osWrapper *cfmysqlfakes.FakeOsWrapper
var runner MysqlRunner
BeforeEach(func() {
exec = new(cfmysqlfakes.FakeExecWrapper)
ioutilWrapper = new(cfmysqlfakes.FakeIoUtilWrapper)
osWrapper = new(cfmysqlfakes.FakeOsWrapper)
runner = NewMysqlRunner(exec, ioutilWrapper, osWrapper)
})
Context("When mysql is not in PATH", func() {
It("Returns an error", func() {
exec.LookPathReturns("", errors.New("PC LOAD LETTER"))
err := runner.RunMysql("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(Equal(errors.New("'mysql' client not found in PATH")))
Expect(exec.LookPathArgsForCall(0)).To(Equal("mysql"))
})
})
Context("When Run returns an error", func() {
It("Forwards the error", func() {
exec.LookPathReturns("/path/to/mysql", nil)
exec.RunReturns(errors.New("PC LOAD LETTER"))
err := runner.RunMysql("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(Equal(errors.New("error running mysql client: PC LOAD LETTER")))
})
})
Context("When mysql is in PATH", func() {
It("Calls mysql with the right arguments", func() {
exec.LookPathReturns("/path/to/mysql", nil)
err := runner.RunMysql("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysql"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysql", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "dbname"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
})
})
Context("When mysql is in PATH and additional arguments are passed", func() {
It("Calls mysql with the right arguments", func() {
exec.LookPathReturns("/path/to/mysql", nil)
err := runner.RunMysql("hostname", 42, "dbname", "username", "password", "", "--foo", "bar", "--baz")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysql"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysql", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "--foo", "bar", "--baz", "dbname"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
})
})
Context("When mysql is in PATH and a TLS CA certificate is part of the service credentials", func() {
It("Stores the cert in a temp file and calls mysql with --ssl-ca=path", func() {
exec.LookPathReturns("/path/to/mysql", nil)
tempFile := new(os.File)
ioutilWrapper.TempFileReturns(tempFile, nil)
osWrapper.NameReturns("/path/to/cert.pem")
err := runner.RunMysql("hostname", 42, "dbname", "username", "password", "cert-content", "--foo", "bar", "--baz")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(ioutilWrapper.TempFileCallCount()).To(Equal(1))
Expect(osWrapper.WriteStringCallCount()).To(Equal(1))
Expect(osWrapper.NameCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
Expect(osWrapper.RemoveCallCount()).To(Equal(1))
tempFileDir, tempFilePattern := ioutilWrapper.TempFileArgsForCall(0)
Expect(tempFileDir).To(Equal(""))
Expect(tempFilePattern).To(Equal("mysql-ca-cert.pem"))
writeStringFile, writeStringString := osWrapper.WriteStringArgsForCall(0)
Expect(writeStringFile).To(BeIdenticalTo(tempFile))
Expect(writeStringString).To(Equal("cert-content"))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysql"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysql", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "--ssl-ca=/path/to/cert.pem", "--foo", "bar", "--baz", "dbname"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
removePath := osWrapper.RemoveArgsForCall(0)
Expect(removePath).To(Equal("/path/to/cert.pem"))
})
})
})
Context("RunMysqlDump", func() {
var exec *cfmysqlfakes.FakeExecWrapper
var ioutil *cfmysqlfakes.FakeIoUtilWrapper
var osWrapper *cfmysqlfakes.FakeOsWrapper
var runner MysqlRunner
BeforeEach(func() {
exec = new(cfmysqlfakes.FakeExecWrapper)
ioutil = new(cfmysqlfakes.FakeIoUtilWrapper)
osWrapper = new(cfmysqlfakes.FakeOsWrapper)
runner = NewMysqlRunner(exec, ioutil, osWrapper)
})
Context("When mysqldump is not in PATH", func() {
It("Returns an error", func() {
exec.LookPathReturns("", errors.New("PC LOAD LETTER"))
err := runner.RunMysqlDump("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(Equal(errors.New("'mysqldump' not found in PATH")))
Expect(exec.LookPathArgsForCall(0)).To(Equal("mysqldump"))
})
})
Context("When Run returns an error", func() {
It("Forwards the error", func() {
exec.LookPathReturns("/path/to/mysqldump", nil)
exec.RunReturns(errors.New("PC LOAD LETTER"))
err := runner.RunMysqlDump("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(Equal(errors.New("error running mysqldump: PC LOAD LETTER")))
})
})
Context("When mysqldump is in PATH", func() {
It("Calls mysqldump with the right arguments", func() {
exec.LookPathReturns("/path/to/mysqldump", nil)
err := runner.RunMysqlDump("hostname", 42, "dbname", "username", "password", "")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysqldump"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysqldump", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "dbname"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
})
})
Context("When mysqldump is in PATH and additional arguments are passed", func() {
It("Calls mysqldump with the right arguments", func() {
exec.LookPathReturns("/path/to/mysqldump", nil)
err := runner.RunMysqlDump("hostname", 42, "dbname", "username", "password", "", "table1", "table2", "--foo", "bar", "--baz")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysqldump"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysqldump", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "--foo", "bar", "--baz", "dbname", "table1", "table2"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
})
})
Context("When mysqldump is in PATH and a TLS CA certificate is part of the service credentials", func() {
It("Stores the cert in a temp file and calls mysqldump with --ssl-ca=path", func() {
exec.LookPathReturns("/path/to/mysqldump", nil)
tempFile := new(os.File)
ioutil.TempFileReturns(tempFile, nil)
osWrapper.NameReturns("/path/to/cert.pem")
err := runner.RunMysqlDump("hostname", 42, "dbname", "username", "password", "cert-content", "table1", "table2", "--foo", "bar", "--baz")
Expect(err).To(BeNil())
Expect(exec.LookPathCallCount()).To(Equal(1))
Expect(ioutil.TempFileCallCount()).To(Equal(1))
Expect(osWrapper.WriteStringCallCount()).To(Equal(1))
Expect(osWrapper.NameCallCount()).To(Equal(1))
Expect(exec.RunCallCount()).To(Equal(1))
Expect(osWrapper.RemoveCallCount()).To(Equal(1))
tempFileDir, tempFilePattern := ioutil.TempFileArgsForCall(0)
Expect(tempFileDir).To(Equal(""))
Expect(tempFilePattern).To(Equal("mysql-ca-cert.pem"))
writeStringFile, writeStringString := osWrapper.WriteStringArgsForCall(0)
Expect(writeStringFile).To(BeIdenticalTo(tempFile))
Expect(writeStringString).To(Equal("cert-content"))
cmd := exec.RunArgsForCall(0)
Expect(cmd.Path).To(Equal("/path/to/mysqldump"))
Expect(cmd.Args).To(Equal([]string{"/path/to/mysqldump", "-u", "username", "-ppassword", "-h", "hostname", "-P", "42", "--ssl-ca=/path/to/cert.pem", "--foo", "bar", "--baz", "dbname", "table1", "table2"}))
Expect(cmd.Stdin).To(Equal(os.Stdin))
Expect(cmd.Stdout).To(Equal(os.Stdout))
Expect(cmd.Stderr).To(Equal(os.Stderr))
removePath := osWrapper.RemoveArgsForCall(0)
Expect(removePath).To(Equal("/path/to/cert.pem"))
})
})
})
})
|
package main
import (
"github.com/beevik/ntp"
log "github.com/sirupsen/logrus"
"os"
"time"
)
func errorWrapper(e error, message string) {
if e != nil {
log.Errorf("Message %s. Error: %v", e, message)
os.Exit(1)
}
}
func GetNtpTime(host string) (time.Time, error) {
t, err := ntp.Time(host)
if err != nil {
return time.Time{}, err
}
return t, nil
}
func main() {
times, err := GetNtpTime("0.beevik-ntp.pool.ntp.org")
errorWrapper(err, "")
log.Info(times)
}
|
package server
import (
"testing"
"github.com/CyCoreSystems/ari-proxy/internal/integration"
)
func TestChannelData(t *testing.T) {
integration.TestChannelData(t, &srv{})
}
func TestChannelAnswer(t *testing.T) {
integration.TestChannelAnswer(t, &srv{})
}
func TestChannelBusy(t *testing.T) {
integration.TestChannelBusy(t, &srv{})
}
func TestChannelCongestion(t *testing.T) {
integration.TestChannelCongestion(t, &srv{})
}
func TestChannelHangup(t *testing.T) {
integration.TestChannelHangup(t, &srv{})
}
func TestChannelList(t *testing.T) {
integration.TestChannelList(t, &srv{})
}
func TestChannelMute(t *testing.T) {
integration.TestChannelMute(t, &srv{})
}
func TestChannelUnmute(t *testing.T) {
integration.TestChannelUnmute(t, &srv{})
}
func TestChannelMOH(t *testing.T) {
integration.TestChannelMOH(t, &srv{})
}
func TestChannelStopMOH(t *testing.T) {
integration.TestChannelStopMOH(t, &srv{})
}
func TestChannelCreate(t *testing.T) {
integration.TestChannelCreate(t, &srv{})
}
func TestChannelContinue(t *testing.T) {
integration.TestChannelContinue(t, &srv{})
}
func TestChannelDial(t *testing.T) {
integration.TestChannelDial(t, &srv{})
}
func TestChannelHold(t *testing.T) {
integration.TestChannelHold(t, &srv{})
}
func TestChannelStopHold(t *testing.T) {
integration.TestChannelStopHold(t, &srv{})
}
func TestChannelRing(t *testing.T) {
integration.TestChannelRing(t, &srv{})
}
func TestChannelStopRing(t *testing.T) {
integration.TestChannelStopRing(t, &srv{})
}
func TestChannelSilence(t *testing.T) {
integration.TestChannelSilence(t, &srv{})
}
func TestChannelStopSilence(t *testing.T) {
integration.TestChannelStopSilence(t, &srv{})
}
func TestChannelOriginate(t *testing.T) {
integration.TestChannelOriginate(t, &srv{})
}
func TestChannelPlay(t *testing.T) {
integration.TestChannelPlay(t, &srv{})
}
func TestChannelRecord(t *testing.T) {
integration.TestChannelRecord(t, &srv{})
}
func TestChannelSnoop(t *testing.T) {
integration.TestChannelSnoop(t, &srv{})
}
func TestChannelSendDTMF(t *testing.T) {
integration.TestChannelSendDTMF(t, &srv{})
}
func TestChannelVariableGet(t *testing.T) {
integration.TestChannelVariableGet(t, &srv{})
}
func TestChannelVariableSet(t *testing.T) {
integration.TestChannelVariableSet(t, &srv{})
}
|
package flags
import (
"encoding/csv"
"fmt"
"github.com/saucelabs/saucectl/internal/config"
"strings"
)
// Simulator represents the simulator configuration.
type Simulator struct {
config.Simulator
Changed bool
}
// String returns a string represenation of the simulator.
func (e Simulator) String() string {
if !e.Changed {
return ""
}
return fmt.Sprintf("%+v", e.Simulator)
}
// Set sets the simulator to the values present in s.
// The input has to be a comma separated string in the format of "key=value,key2=value2".
// This method is called by cobra when CLI flags are parsed.
func (e *Simulator) Set(s string) error {
e.Changed = true
rec, err := csv.NewReader(strings.NewReader(s)).Read()
if err != nil {
return err
}
// TODO consider defaulting this in a common place (between config and CLI flags)
e.PlatformName = "iOS"
for _, v := range rec {
vs := strings.Split(v, "=")
val := vs[1]
switch vs[0] {
case "name":
e.Name = val
case "orientation":
e.Orientation = val
case "platformVersion":
e.PlatformVersions = []string{val}
}
}
return nil
}
// Type returns the value type.
func (e Simulator) Type() string {
return "simulator"
}
|
// publicly availble code for cargo entry
package cargo
import (
"fmt"
"sync"
"github.com/ArmadaStore/cargo/pkg/lib"
)
func Run(cargoMgrIP string, cargoMgrPort string, cargoPort string, volSize string) error {
cargoInfo := lib.Init(cargoMgrIP, cargoMgrPort, cargoPort, volSize)
cargoInfo.Register()
var wg sync.WaitGroup
wg.Add(1)
go cargoInfo.ListenTasks(&wg)
// wg.Add(1)
// go cargoInfo.SendToReplicas()
wg.Add(1)
go cargoInfo.WriteToReplicas()
wg.Wait()
return fmt.Errorf("Hello")
}
|
package db
import (
"context"
"fmt"
"log"
"time"
"github.com/letrannhatviet/my_framework/config"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
var (
dbName = config.Config.MongoDB.Name
dbCol = "Student"
)
var Client *mongo.Client
func init() {
connectionString := fmt.Sprintf(
"mongodb://%s:%s@%s:%s",
config.Config.MongoDB.User,
config.Config.MongoDB.Password,
config.Config.MongoDB.Host,
config.Config.MongoDB.Port,
)
fmt.Println(connectionString)
client, err := mongo.NewClient(options.Client().ApplyURI(connectionString))
if err != nil {
log.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = client.Connect(ctx)
if err != nil {
panic(err)
}
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
err = client.Ping(ctx, readpref.Primary())
if err != nil {
log.Fatal(err)
}
defer cancel()
Client = client
}
|
package main
import (
"log"
"net/http"
"github.com/julienschmidt/httprouter"
linkRouter "github.com/wicoady1/gtdr-score-parser/router"
)
func main() {
router := httprouter.New()
router.GET("/", linkRouter.Index)
router.POST("/uploadfile", linkRouter.UploadFile)
router.GET("/resultimage", linkRouter.ResultImage)
router.POST("/resultimage", linkRouter.ResultImage)
router.ServeFiles("/sources/*filepath", http.Dir("sources"))
router.ServeFiles("/asset/*filepath", http.Dir("asset"))
log.Println("Serving on 8080")
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package utils
import (
"errors"
"fmt"
"math"
"strconv"
"time"
"github.com/golang191119/nc_crm/db"
"github.com/golang191119/nc_crm/model"
"github.com/golang191119/nc_crm/model/request"
)
func TimeFormat(t time.Time) string {
month := strconv.Itoa(int(t.Month()))
if len(month) == 1 {
month = "0" + month
}
day := strconv.Itoa(t.Day())
if len(day) == 1 {
day = "0" + day
}
hour := strconv.Itoa(t.Hour())
if len(hour) == 1 {
hour = "0" + hour
}
minute := strconv.Itoa(t.Minute())
if len(minute) == 1 {
minute = "0" + minute
}
second := strconv.Itoa(t.Second())
if len(second) == 1 {
second = "0" + second
}
str := fmt.Sprintf("%v-%v-%v %v:%v:%v", t.Year(), month, day, hour, minute, second)
fmt.Println("TIME STAMP: ", str)
return str
}
func CalBasePrice(contractID int, truckType int, distance int) (float64, error) {
var total float64
var ratePrice []model.RatePrice
database := db.GetDB()
selectItems := "truck_level.rate, truck_level.code, truck_rate_card_level.price"
dbc := database.Table("truck_level").Joins("JOIN truck_rate_card_level ON truck_level.id = truck_rate_card_level.level_id").Joins("JOIN truck_rate_card ON truck_rate_card.id = truck_rate_card_level.rate_card_id").Joins("JOIN customer_contract ON customer_contract.id = truck_rate_card.contract_id").Where("customer_contract.id = ? AND truck_rate_card.truck_type = ?", contractID, truckType).Order("truck_level.rate asc").Select(selectItems)
defer database.Close()
if dbc.Error != nil {
return 0, dbc.Error
}
rows, err := dbc.Rows()
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
var rate int
var code string
var price float64
err = rows.Scan(&rate, &code, &price)
if err != nil {
return 0, err
}
ratePrice = append(ratePrice, model.RatePrice{
Rate: rate,
Code: code,
Price: price,
})
}
if len(ratePrice) == 0 {
return 0, errors.New("Rate card level is not found!")
}
if len(ratePrice) == 1 {
return float64(distance) * ratePrice[0].Price, nil
}
// Calculate base price by distance steps
for i := 0; i < len(ratePrice)-1; i++ {
if distance <= 0 {
break
}
if distance <= ratePrice[i+1].Rate {
total += float64(distance) * ratePrice[i].Price
distance -= ratePrice[i+1].Rate
} else {
distance -= ratePrice[i+1].Rate
total += float64(ratePrice[i+1].Rate) * ratePrice[i].Price
}
}
if distance > 0 {
total += ratePrice[len(ratePrice)-1].Price * float64(distance)
}
return math.Ceil(total), nil
}
func CalFee(contractID int, r *request.Consumption) (float64, error) {
var contract model.CustomerContract
database := db.GetDB()
defer database.Close()
dbc := database.Model(model.CustomerContract{}).Where("id = ?", contractID).Find(&contract)
if dbc.Error != nil {
return 0, dbc.Error
}
var total float64
// RETURN FEE
if r.ReturnAmount > 0 {
total += float64(r.ReturnAmount) * float64(contract.ReturnRatio)
}
// COD
if r.Cod > 0 {
total += float64(r.Cod) * float64(contract.CodRatio)
}
// STOP COUNT
if r.StopCount > 0 {
total += float64(r.StopCount) * float64(contract.StopFee)
}
// PAPER
if r.PaperCount > 0 {
total += float64(r.PaperCount) * float64(contract.PaperFee)
}
// LIFTING
if r.LiftingCount > 0 {
total += float64(r.LiftingCount) * float64(contract.LiftFee)
}
// CHECKING
if r.CheckingCount > 0 {
total += float64(r.CheckingCount) * float64(contract.CheckFee)
}
// VALUE (Khai gia)
if r.Value > 0 {
total += r.Value * float64(contract.ValueRatio)
}
return math.Ceil(total), nil
}
|
// 找零钱问题
// 假设有1元、2元、5元、10元、20元、50元、100元、200元面额的硬币或者纸币。现在需要N元钱,有多少种零钱组合方式
package main
import (
"fmt"
"strconv"
)
// 动态规划
func dp(A []int, money int) int {
dp := make([]int, money+1)
dp[0] = 1
for i:=0; i<len(A); i++ {
for j:= A[i]; j<= money; j++ {
dp[j] = dp[j] + dp[j - A[i]]
}
}
return dp[money]
// fmt.Println(dp)
}
// 暴力搜索
func vsearch(A []int, index int, aim int) int{
result := 0
if index == len(A) {
if aim == 0 {
result = 1
}else{
result = 0
}
}else{
for i:=0;i * A[index] <= aim; i++ {
result = result + vsearch(A, index+1, aim - i*A[index])
}
}
return result
}
// 记忆搜索
var B map[string]int
func msearch(A []int, index int, aim int) int {
key := strconv.Itoa(index) + "_" + strconv.Itoa(aim)
if _,ok := B[key]; ok {
return B[key]
}
result := 0
if index == len(A) {
if aim == 0 {
result = 1
}else{
result = 0
}
}else {
for i:=0; i*A[index] <= aim; i++ {
result = result + msearch(A, index+1, aim - i*A[index])
}
}
B[key] = result
return result
}
func main() {
// num := 0
A :=[]int{1,2,5,10,20,50,100,200};
money := 2000
//动态规划实现
fmt.Println(dp(A, money))
// fmt.Println(vsearch(A, 0, money))
B = map[string]int{}
fmt.Println(msearch(A, 0, money))
}
|
package main
import (
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"hash"
"io"
"io/ioutil"
"net/http"
"os"
)
func hashFile(filePath string, hashCreator func() hash.Hash, encodeMethod func([]byte) string) (string, error) {
//Initialize variable hashString now in case an error has to be returned
var hashString string
//Open the passed argument and check for any error
file, err := os.Open(filePath)
if err != nil {
return hashString, err
}
//Tell the program to call the following function when the current function returns
defer file.Close()
//Open a new hash interface to write to
hash := hashCreator()
//Copy the file in the hash interface and check for any error
if _, err := io.Copy(hash, file); err != nil {
return hashString, err
}
//Get the hash in bytes
hashInBytes := hash.Sum(nil)
//Convert the bytes to a string
hashString = encodeMethod(hashInBytes)
return hashString, nil
}
func hashFileMd5(filePath string) (string, error) {
return hashFile(filePath, md5.New, hex.EncodeToString)
}
func hashFileBase64Sha256(filePath string) (string, error) {
return hashFile(filePath, sha256.New, base64.StdEncoding.EncodeToString)
}
func downloadFile(filepath string, url string) (string, string, string, error) {
// Get the data
resp, err := http.Get(url)
if err != nil {
return "", "", "", err
}
defer resp.Body.Close()
etag := resp.Header.Get("etag")
// Create the file
out, err := os.Create(filepath)
if err != nil {
return "", "", "", err
}
defer out.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return "", "", "", err
}
base64Sha256, err := hashFileBase64Sha256(out.Name())
if err != nil {
return "", "", "", err
}
md5, err := hashFileMd5(out.Name())
if err != nil {
return "", "", "", err
}
return base64Sha256, md5, etag, nil
}
func checkIfRemoteFileChanged(url string, oldBase64Sha256 string, etag string) (bool, error) {
client := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
if etag != "" {
req.Header.Set("If-None-Match", etag)
}
resp, err := client.Do(req)
if err != nil {
return false, err
} else if resp.StatusCode == 304 {
return true, nil
}
// If we can't use the etag then download the file and check its hash
tmpfile, err := ioutil.TempFile("", "*")
tmpFilePath := tmpfile.Name()
// Create the file
out, err := os.Create(tmpFilePath)
if err != nil {
return false, err
}
defer out.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return false, err
}
// Check if the hashes match
remoteBase64Sha256, err := hashFileBase64Sha256(out.Name())
if err != nil {
return false, err
} else if remoteBase64Sha256 != oldBase64Sha256 {
return true, nil
}
return false, nil
}
|
package adabas
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
const exportFileName = "/tmp/go-test-export.json"
func TestExportMap(t *testing.T) {
os.Remove(exportFileName)
url, _ := NewURL("23")
dbURL := DatabaseURL{URL: *url, Fnr: 4}
repository := NewMapRepositoryWithURL(dbURL)
ada, _ := NewAdabas(url)
err := repository.ExportMapRepository(ada, "", exportFileName)
assert.NoError(t, err)
}
|
// /*
// Copyright 2017 The Rook Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Some of the code below came from https://github.com/coreos/etcd-operator
// which also has the apache 2.0 license.
// */
package crd
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api"
)
func RegisterFakeAPI() *runtime.Scheme {
scheme := runtime.NewScheme()
api.SchemeBuilder.AddToScheme(scheme)
api.SchemeBuilder.Register(addKnownTypes)
return scheme
}
type MockVolumeAttachmentController struct {
MockCreate func(volumeAttachment VolumeAttachment) error
MockGet func(namespace, name string) (VolumeAttachment, error)
MockList func(namespace string) (VolumeAttachmentList, error)
MockUpdate func(volumeAttachment VolumeAttachment) error
MockDelete func(namespace, name string) error
}
func (m *MockVolumeAttachmentController) Create(volumeAttachment VolumeAttachment) error {
if m.MockCreate != nil {
return m.MockCreate(volumeAttachment)
}
return nil
}
func (m *MockVolumeAttachmentController) Get(namespace, name string) (VolumeAttachment, error) {
if m.MockGet != nil {
return m.MockGet(namespace, name)
}
return VolumeAttachment{}, nil
}
func (m *MockVolumeAttachmentController) List(namespace string) (VolumeAttachmentList, error) {
if m.MockList != nil {
return m.MockList(namespace)
}
return VolumeAttachmentList{}, nil
}
func (m *MockVolumeAttachmentController) Update(volumeAttachment VolumeAttachment) error {
if m.MockUpdate != nil {
return m.MockUpdate(volumeAttachment)
}
return nil
}
func (m *MockVolumeAttachmentController) Delete(namespace, name string) error {
if m.MockDelete != nil {
return m.MockDelete(namespace, name)
}
return nil
}
|
package main
import (
"fmt"
"math"
)
var totalUniversalSubarrays int
var uniSubarrList [][]int
func main() {
// fmt.Println("LongestStablePriceCountArr => ", fetchLongestStablePrices([]int{3, 1,2,1,2,2,1,3,1,1,2,2,2,2}, 1))
// fmt.Println("NoOfUniversalArray => ", countingUniversalSubarrays([]int{4,4,2,2,4,2}))
processSubArrays([]int{4,4,2,2,4,2}, 0, 0)
fmt.Println("totalUniversalSubarrays => ", totalUniversalSubarrays)
}
func processSubArrays(data []int, start, end int){
if end == len(data) {
return
} else if start > end {
processSubArrays(data, 0, end+1)
} else {
subArray := data[start:end+1]
if len(subArray) % 2 == 0 {
// fmt.Println("subArray => ", subArray)
if countingUniversalSubarrays(subArray) {
totalUniversalSubarrays++
// FIXME: Remove the duplicate array from the count
uniSubarrList = append(uniSubarrList, subArray)
fmt.Println("Is universal subarray: TRUE")
} else {
fmt.Println("Is universal subarray: FALSE")
}
}
processSubArrays(data, start+1, end)
}
}
// countingUniversalSubarrays counts Universal Subarrays
func countingUniversalSubarrays(subArray []int) bool{
fmt.Println("Checking for Unitversal subArray => ", subArray)
prevValue := 0
prevValueCount := 0
newValue := 0
newValueCount := 0
for _, v := range subArray {
if prevValue == 0 || (prevValue == v && newValue == 0) {
if prevValue == 0 {
prevValue = v
}
prevValueCount++
prevValue = v
} else {
if newValue == 0 {
newValue = v
}
// return false, if there is third value
if newValue != v {
return false
}
newValueCount++
}
}
fmt.Println("prevValue: ", prevValue)
fmt.Println("prevValueCount: ", prevValueCount)
fmt.Println("newValue: ", newValue)
fmt.Println("newValueCount: ", newValueCount)
return prevValueCount == newValueCount
}
// fetchLongestStablePrices fetches longest stable market period
func fetchLongestStablePrices (data []int, x int) []int {
// data := [14]int{3, 1,2,1,2,2,1,3,1,1,2,2,2,2}
// x := 1
var stablePeriodArr [][]int
var prevPrice int
var stablePeriod []int
lastIndex := len(data) - 1
for i, currPrice := range data {
if prevPrice == 0 {
prevPrice = currPrice
}
// fmt.Println("prevPrice => ", prevPrice)
// fmt.Println("currPrice => ", currPrice)
diff := currPrice - prevPrice
diffAbs := int(math.Abs(float64(diff)))
// fmt.Println("diffAbs => ", diffAbs)
// fmt.Println("diff abs => ", diffAbs)
// fmt.Println("X => ", x)
if diffAbs <= x {
// fmt.Println("TRUE")
stablePeriod = append(stablePeriod, currPrice)
}
if int(math.Abs(float64(diff))) > x {
// fmt.Println("FALSE")
stablePeriodArr = append(stablePeriodArr, stablePeriod)
stablePeriod = nil
stablePeriod = append(stablePeriod, currPrice)
}
prevPrice = currPrice
// fmt.Println("stablePeriod => ", stablePeriod)
if i == lastIndex {
stablePeriodArr = append(stablePeriodArr, stablePeriod)
}
}
// fmt.Println("stablePeriod => ", stablePeriod)
// fmt.Println("stablePeriodArr => ", stablePeriodArr)
longestStablePriceCount := 0
var longestStablePriceCountArr []int
// TODO: this loop can also be avoided by putting the logic in previous loop
for _, v := range stablePeriodArr {
if len(v) > longestStablePriceCount {
longestStablePriceCount = len(v)
longestStablePriceCountArr = v
}
}
return longestStablePriceCountArr
} |
package service
import (
"bytes"
"fmt"
"github.com/go-ocf/cloud/portal-webapi/uri"
"github.com/go-ocf/kit/log"
"github.com/ugorji/go/codec"
"github.com/valyala/fasthttp"
router "github.com/buaazp/fasthttprouter"
pbRA "github.com/go-ocf/cloud/resource-aggregate/pb"
pbDD "github.com/go-ocf/cloud/resource-directory/pb/device-directory"
pbRD "github.com/go-ocf/cloud/resource-directory/pb/resource-directory"
pbRS "github.com/go-ocf/cloud/resource-directory/pb/resource-shadow"
)
//RequestHandler for handling incoming request
type RequestHandler struct {
config Config
server *Server
raClient pbRA.ResourceAggregateClient
rsClient pbRS.ResourceShadowClient
rdClient pbRD.ResourceDirectoryClient
ddClient pbDD.DeviceDirectoryClient
}
//NewRequestHandler factory for new RequestHandler
func NewRequestHandler(server *Server, raClient pbRA.ResourceAggregateClient, rsClient pbRS.ResourceShadowClient, rdClient pbRD.ResourceDirectoryClient, ddClient pbDD.DeviceDirectoryClient) *RequestHandler {
return &RequestHandler{
server: server,
raClient: raClient,
rsClient: rsClient,
rdClient: rdClient,
ddClient: ddClient,
}
}
func logAndWriteErrorResponse(err error, statusCode int, ctx *fasthttp.RequestCtx) {
log.Errorf("%v", err)
ctx.Response.SetBody([]byte(err.Error()))
ctx.SetStatusCode(statusCode)
}
func toJson(v interface{}) ([]byte, error) {
bw := bytes.NewBuffer(make([]byte, 0, 1024))
h := &codec.JsonHandle{}
h.BasicHandle.Canonical = true
err := codec.NewEncoder(bw, h).Encode(v)
if err != nil {
return nil, fmt.Errorf("cannot convert to json: %v", err)
}
return bw.Bytes(), nil
}
func writeJson(v interface{}, statusCode int, ctx *fasthttp.RequestCtx) {
body, err := toJson(v)
if err != nil {
err = fmt.Errorf("cannot write body: %v", err)
logAndWriteErrorResponse(err, fasthttp.StatusInternalServerError, ctx)
return
}
ctx.Response.Header.SetContentType("application/json")
ctx.Response.SetBody(body)
ctx.SetStatusCode(statusCode)
}
func validateRequest(ctx *fasthttp.RequestCtx, cbk func(ctx *fasthttp.RequestCtx, token, sub string)) {
token, sub, err := parseAuth(ctx)
if err != nil {
logAndWriteErrorResponse(fmt.Errorf("invalid request: %v", err), fasthttp.StatusUnauthorized, ctx)
return
}
cbk(ctx, token, sub)
}
//NewHTTP return router handle HTTP request
func NewHTTP(requestHandler *RequestHandler) *router.Router {
router := router.New()
router.GET(uri.Devices, func(ctx *fasthttp.RequestCtx) {
validateRequest(ctx, requestHandler.listDevices)
})
router.DELETE(uri.Devices+"/:deviceId", func(ctx *fasthttp.RequestCtx) {
validateRequest(ctx, requestHandler.offboardDevice)
})
router.GET(uri.Resources, func(ctx *fasthttp.RequestCtx) {
validateRequest(ctx, requestHandler.listResources)
})
router.GET(uri.Resources+"/:resourceId", func(ctx *fasthttp.RequestCtx) {
validateRequest(ctx, requestHandler.getResourceContent)
})
router.PUT(uri.Resources+"/:resourceId", func(ctx *fasthttp.RequestCtx) {
validateRequest(ctx, requestHandler.updateResourceContent)
})
router.GET(uri.Healthcheck, requestHandler.healthcheck)
return router
}
|
// Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufmodule
import "time"
// externalLockFile represents the buf.lock configuration file.
type externalLockFile struct {
Deps []*externalLockFileDep `json:"deps" yaml:"deps"`
}
// modulePins expected to be sorted and unique
func newExternalLockFile(modulePins []ModulePin) *externalLockFile {
deps := make([]*externalLockFileDep, len(modulePins))
for i, modulePin := range modulePins {
deps[i] = newExternalLockFileDep(modulePin)
}
return &externalLockFile{
Deps: deps,
}
}
type externalLockFileDep struct {
Remote string `json:"remote,omitempty" yaml:"remote,omitempty"`
Owner string `json:"owner,omitempty" yaml:"owner,omitempty"`
Repository string `json:"repository,omitempty" yaml:"repository,omitempty"`
Branch string `json:"branch,omitempty" yaml:"branch,omitempty"`
Commit string `json:"commit,omitempty" yaml:"commit,omitempty"`
Digest string `json:"digest,omitempty" yaml:"digest,omitempty"`
CreateTime time.Time `json:"create_time,omitempty" yaml:"create_time,omitempty"`
}
func newExternalLockFileDep(modulePin ModulePin) *externalLockFileDep {
return &externalLockFileDep{
Remote: modulePin.Remote(),
Owner: modulePin.Owner(),
Repository: modulePin.Repository(),
Branch: modulePin.Branch(),
Commit: modulePin.Commit(),
Digest: modulePin.Digest(),
CreateTime: modulePin.CreateTime(),
}
}
func modulePinsForExternalLockFile(externalLockFile *externalLockFile) ([]ModulePin, error) {
modulePins := make([]ModulePin, len(externalLockFile.Deps))
for i, dep := range externalLockFile.Deps {
modulePin, err := NewModulePin(
dep.Remote,
dep.Owner,
dep.Repository,
dep.Branch,
dep.Commit,
dep.Digest,
dep.CreateTime,
)
if err != nil {
return nil, err
}
modulePins[i] = modulePin
}
// just to be safe
SortModulePins(modulePins)
if err := ValidateModulePinsUniqueByIdentity(modulePins); err != nil {
return nil, err
}
return modulePins, nil
}
|
package core
import (
"context"
"encoding/json"
"errors"
"time"
"github.com/google/uuid"
)
type Namespace struct {
ID uuid.UUID
Name string
Config string
RootsInfo string
CreatedAt time.Time
UpdatedAt time.Time
}
type RootInfo struct {
Name string
RootID uuid.UUID
}
type RootsInfo struct {
Default RootInfo
}
func (ri *RootsInfo) Marshal() string {
data, err := json.Marshal(ri)
if err != nil {
panic(err)
}
return string(data)
}
func (ns *Namespace) Roots() (*RootsInfo, error) {
ri := new(RootsInfo)
err := json.Unmarshal([]byte(ns.RootsInfo), ri)
if err != nil {
return nil, err
}
return ri, nil
}
func (ns *Namespace) GetAttributes() map[string]string {
return map[string]string{
"namespace": ns.Name,
"namespace-id": ns.ID.String(),
}
}
var (
ErrInvalidNamespaceName = errors.New("ErrInvalidNamespaceName")
ErrDuplicatedNamespaceName = errors.New("ErrDuplicatedNamespaceName")
)
// NamespacesStore responsible for fetching and setting namespaces from datastore.
type NamespacesStore interface {
// GetByID gets a single namespace object from store. if no record found,
// it returns datastore.ErrNotFound error.
GetByID(ctx context.Context, id uuid.UUID) (*Namespace, error)
// GetByName gets a single namespace object from store. if no record found,
// it returns datastore.ErrNotFound error.
GetByName(ctx context.Context, name string) (*Namespace, error)
// GetAll gets all namespaces from store.
GetAll(ctx context.Context) ([]*Namespace, error)
// Update changes a namespace data.
Update(ctx context.Context, namespace *Namespace) (*Namespace, error)
// Delete deletes a single namespace. if no record found,
// // it returns datastore.ErrNotFound error.
Delete(ctx context.Context, id uuid.UUID) error
// Create creates a new namespace. Returned errors could be ErrDuplicatedNamespaceName when namespace name is
// already exists or ErrInvalidNamespaceName or when namespace name is invalid, too short or too long.
Create(ctx context.Context, namespace *Namespace) (*Namespace, error)
}
const DefaultNamespaceConfig = `
{
"broadcast": {
"workflow.create": false,
"workflow.update": false,
"workflow.delete": false,
"directory.create": false,
"directory.delete": false,
"workflow.variable.create": false,
"workflow.variable.update": false,
"workflow.variable.delete": false,
"namespace.variable.create": false,
"namespace.variable.update": false,
"namespace.variable.delete": false,
"instance.variable.create": false,
"instance.variable.update": false,
"instance.variable.delete": false,
"instance.started": false,
"instance.success": false,
"instance.failed": false
}
}
`
|
package main
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"golang.org/x/crypto/bcrypt"
)
func emptyCollection(c *mongo.Collection) (int64, error) {
deleteResult, err := c.DeleteMany(context.Background(), bson.D{{}})
if err != nil {
return 0, err
}
return deleteResult.DeletedCount, nil
}
func insertSampleStudents(db *mongo.Database) (*mongo.InsertManyResult, error) {
studentCollection := db.Collection("Students")
emptyCollection(studentCollection)
pass, err := bcrypt.GenerateFromPassword([]byte("pass123"), bcrypt.DefaultCost)
batchID, err := primitive.ObjectIDFromHex("5ff37a95c8f63363476389f6")
s1 := Student{
Username: "Ayush.Sharma",
Password: string(pass),
Name: "Ayush Sharma",
Email: "ayush.sharma@test.com",
Std: "Sem 1-2",
Balance: 25000.0,
Location: "Delhi",
Batches: []primitive.ObjectID{batchID},
}
s2 := Student{
Username: "Rahul.Singh",
Password: string(pass),
Name: "Rahul Singh",
Email: "rahul.singh@test.com",
Std: "Sem 1-2",
Balance: 22000.0,
Location: "Jaipur",
Batches: []primitive.ObjectID{batchID},
}
studentList := []interface{}{s1, s2}
result, err := studentCollection.InsertMany(context.Background(), studentList)
return result, err
}
func insertSampleInstructors(db *mongo.Database) (*mongo.InsertOneResult, error) {
instructorCollection := db.Collection("Instructors")
emptyCollection(instructorCollection)
pass, err := bcrypt.GenerateFromPassword([]byte("pass123"), bcrypt.DefaultCost)
batchID1, err := primitive.ObjectIDFromHex("5ff37a95c8f63363476389f6")
batchID2, err := primitive.ObjectIDFromHex("5ff37a95c8f63363476389f7")
batch_ids := []primitive.ObjectID{batchID1, batchID2}
i1 := Instructor{
Username: "Rajiv.Kumar",
Password: string(pass),
Name: "Rajiv Kumar",
Email: "rajiv.kumar@test.com",
Fees: 1210.0,
Batches: batch_ids,
}
result, err := instructorCollection.InsertOne(context.Background(), i1)
return result, err
}
|
package echo
import (
"net"
"sync"
"github.com/korylprince/go-icmpv4/v2"
)
//Send sends an ICMPv4 Echo Request to raddr from laddr with the given identifier and sequence
func Send(laddr, raddr *net.IPAddr, identifier, sequence uint16) (err error) {
p := NewEchoRequest(identifier, sequence)
return icmpv4.Send(laddr, raddr, p.Marshal())
}
//convertAndFilter
func convertAndFilter(wg *sync.WaitGroup, in <-chan *icmpv4.IPPacket, out chan<- *IPPacket) {
defer wg.Done()
for p := range in {
if p.Type == 0 && p.Code == 0 {
out <- &IPPacket{
Packet: &Packet{Packet: p.Packet},
LocalAddr: p.LocalAddr,
RemoteAddr: p.RemoteAddr,
}
}
}
}
//Listener parses incoming ICMPv4 Echo Replys from an ICMPv4 net.IPConn and sends packets and errors back on channels.
//When done is closed, it returns an error (or nil) from conn.Close().
func Listener(conn *net.IPConn, packets chan<- *IPPacket, errors chan<- error, done <-chan struct{}) error {
packetsInternal := make(chan *icmpv4.IPPacket)
wg := new(sync.WaitGroup)
wg.Add(1)
go convertAndFilter(wg, packetsInternal, packets)
err := icmpv4.Listener(conn, packetsInternal, errors, done)
close(packetsInternal)
wg.Wait()
return err
}
//ListenerAll creates a Listener for all IPv4 connections available. It returns a list of addresses that it's
//listening on or an error if it can't get that list.
func ListenerAll(packets chan<- *IPPacket, errors chan<- error, done <-chan struct{}) ([]*net.IPAddr, error) {
packetsInternal := make(chan *icmpv4.IPPacket)
wg := new(sync.WaitGroup)
wg.Add(1)
go convertAndFilter(wg, packetsInternal, packets)
intfs, err := icmpv4.ListenerAll(packetsInternal, errors, done)
close(packetsInternal)
wg.Wait()
return intfs, err
}
|
// Package protobuf implements Protocol Buffers reflectively
// using Go types to define message formats.
//
// This approach provides convenience similar to Gob encoding,
// but with a widely-used and language-neutral wire format.
// For general information on Protocol buffers see
// https://developers.google.com/protocol-buffers.
//
// In contrast with goprotobuf,
// this package does not require users to write or compile .proto files;
// you just define the message formats you want as Go struct types.
// Consider this example message format definition
// from the Protocol Buffers overview:
//
// message Person {
// required string name = 1;
// required int32 id = 2;
// optional string email = 3;
//
// enum PhoneType {
// MOBILE = 0;
// HOME = 1;
// WORK = 2;
// }
//
// message PhoneNumber {
// required string number = 1;
// optional PhoneType type = 2;
// }
//
// repeated PhoneNumber phone = 4;
// }
//
// The following Go type and const definitions express exactly the same format,
// for the purposes of encoding and decoding with this protobuf package:
//
// type Person struct {
// Name string
// Id int32
// Email *string
// Phone []PhoneNumber
// }
//
// type PhoneType uint32
// const (
// MOBILE PhoneType = iota
// HOME
// WORK
// )
//
// type PhoneNumber struct {
// Number string
// Type *PhoneType
// }
//
// To encode a message, you simply call the Encode() function
// with a pointer to the struct you wish to encode, and
// Encode() returns a []byte slice containing the protobuf-encoded struct:
//
// person := Person{...}
// buf := Encode(&person)
// output.Write(buf)
//
// To decode an encoded message, simply call Decode() on the byte-slice:
//
// err := Decode(buf,&person,nil)
// if err != nil {
// panic("Decode failed: "+err.Error())
// }
//
// If you want to interoperate with code in other languages
// using the same message formats, you may of course still end up writing
// .proto files for the code in those other languages.
// However, defining message formats with native Go types enables these types
// to be tailored to the code using them without affecting wire compatibility,
// such as by attaching useful methods to these struct types.
// The translation between a Go struct definition
// and a basic Protocol Buffers message format definition is straightforward;
// the rules are as follows.
//
// A message definition in a .proto file translates to a Go struct,
// whose fields are implicitly assigned consecutive numbers starting from 1.
// If you need to leave gaps in the field number sequence
// (e.g., to delete an obsolete field without breaking wire compatibility),
// then you can skip that field number using a blank Go field, like this:
//
// type Padded struct {
// Field1 string // = 1
// _ struct{} // = 2 (unused field number)
// Field2 int32 // = 3
// }
//
// A 'required' protobuf field translates to a plain field
// of a corresponding type in the Go struct.
// The following table summarizes the correspondence between
// .proto definition types and Go field types:
//
// Protobuf Go
// -------- --
// bool bool
// enum Enum
// int32 uint32
// int64 uint64
// uint32 uint32
// uint64 uint64
// sint32 int32
// sint64 int64
// fixed32 Ufixed32
// fixed64 Ufixed64
// sfixed32 Sfixed32
// sfixed64 Sfixed64
// float float32
// double float64
// string string
// bytes []byte
// message struct
//
// An 'optional' protobuf field is expressed as a pointer field in Go.
// Encode() will transmit the field only if the pointer is non-nil.
// Decode() will instantiate the pointed-to type and fill in the pointer
// if the field is present in the message being decoded,
// leaving the pointer unmodified (usually nil) if the field is not present.
//
// A 'repeated' protobuf field translates to a slice field in Go.
// Slices of primitive bool, integer, and float types are encoded
// and decoded in packed format, as if the [packed=true] option
// was declared for the field in the .proto file.
//
// For flexibility and convenience, struct fields may have interface types,
// which this package interprets as having dynamic types to be bound at runtime.
// Encode() follows the interface's implicit pointer and uses reflection
// to determine the referred-to object's actual type for encoding
// Decode() takes an optional map of interface types to constructor functions,
// which it uses to instantiate concrete types for interfaces while decoding.
// Furthermore, if the instantiated types support the Encoding interface,
// Encode() and Decode() will invoke the methods of that interface,
// allowing objects to implement their own custom encoding/decoding methods.
//
// This package does not try to support all possible protobuf formats.
// It currently does not support nonzero default value declarations for enums,
// the legacy unpacked formats for repeated numeric fields,
// messages with extremely sparse field numbering,
// or other more exotic features like extensions or oneof.
// If you need to interoperate with existing protobuf code using these features,
// then you should probably use goprotobuf,
// at least for those particular message formats.
//
// Another downside of this reflective approach to protobuf implementation
// is that reflective code is generally less efficient than
// statically generated code, as gogoprotobuf produces for example.
// If we decide we want the convenience of format definitions in Go
// with the runtime performance of static code generation,
// we could in principle achieve that by adding a "Go-format"
// message format compiler frontend to goprotobuf or gogoprotobuf -
// but we leave this as an exercise for the reader.
package protobuf
|
package uuid
import (
"fmt"
"strings"
"testing"
)
func TestPrefixedUUID(t *testing.T) {
type args struct {
prefix string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "Test with Happy Path",
args: args{
prefix: "ab",
},
wantErr: true,
},
{
name: "Test with Too Long",
args: args{
prefix: "abcd",
},
wantErr: true,
},
{
name: "Test with Empty String",
args: args{
prefix: "",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := PrefixedUUID(tt.args.prefix); !strings.Contains(got[0:3], tt.args.prefix) && !tt.wantErr {
t.Errorf("PrefixedUUID() = %v, want %v", got, tt.args.prefix)
} else {
fmt.Printf("Success: Wanted prefix of %s, got %s\n", tt.args.prefix, got)
}
})
}
}
|
package main
import (
"encoding/json"
"fmt"
)
type User struct {
Id string
Phone []string
}
func main() {
user:=User{
Id:"123",
Phone:[]string{"A","B","C","D"},
}
bytes,err:=json.Marshal(&user)
if err!=nil{
panic(err)
}
user1:=User{}
json.Unmarshal(bytes,&user1)
fmt.Println("user1",user1)
var A map[string]string
fmt.Println(A["a"],"aaa")
}
|
package render
import (
"io/ioutil"
"regexp"
"strings"
"github.com/devspace-cloud/devspace/cmd"
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/e2e/utils"
"github.com/devspace-cloud/devspace/pkg/devspace/deploy/deployer/helm"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/pkg/errors"
)
var chartRegEx = regexp.MustCompile(`component-chart-[^\"]+`)
func replaceComponentChart(in string) string {
return chartRegEx.ReplaceAllString(in, "component-chart-"+helm.DevSpaceChartConfig.Version)
}
func runHelmV2(f *customFactory, logger log.Logger) error {
logger.Info("Run sub test 'helm_v2' of test 'render'")
logger.StartWait("Run test...")
defer logger.StopWait()
err := beforeTest(f, "helm_v2")
defer afterTest(f)
if err != nil {
return errors.Errorf("test 'render' failed: %s %v", f.GetLogContents(), err)
}
rc := &cmd.RenderCmd{
GlobalFlags: &flags.GlobalFlags{},
SkipPush: true,
Tags: []string{"rM5xKXK"},
}
done := utils.Capture()
err = rc.Run(f, nil, nil)
if err != nil {
return err
}
capturedOutput, err := done()
if err != nil {
return err
}
_ = utils.ChangeWorkingDir(f.Pwd+"/tests/render", f.GetLog())
expectedOutput, err := ioutil.ReadFile("./expectedoutput/helm_v2")
if err != nil {
return err
}
expectedOutputStr := replaceComponentChart(string(expectedOutput))
if strings.Index(capturedOutput, expectedOutputStr) == -1 {
return errors.Errorf("output '%s' does not match expected output '%s'", capturedOutput, expectedOutputStr)
}
imagesExpected := 1
imagesCount := len(f.builtImages)
if imagesCount != imagesExpected {
return errors.Errorf("built images expected: %v, found: %v", imagesExpected, imagesCount)
}
f.builtImages = map[string]string{}
return nil
}
|
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
import (
"errors"
"path/filepath"
"reflect"
"testing"
"github.com/google/blueprint"
)
func TestDependingOnModuleInSameNamespace(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
a := getModule(ctx, "a")
b := getModule(ctx, "b")
if !dependsOn(ctx, b, a) {
t.Errorf("module b does not depend on module a in the same namespace")
}
}
func TestDependingOnModuleInRootNamespace(t *testing.T) {
ctx := setupTest(t,
map[string]string{
".": `
test_module {
name: "b",
deps: ["a"],
}
test_module {
name: "a",
}
`,
},
)
a := getModule(ctx, "a")
b := getModule(ctx, "b")
if !dependsOn(ctx, b, a) {
t.Errorf("module b in root namespace does not depend on module a in the root namespace")
}
}
func TestImplicitlyImportRootNamespace(t *testing.T) {
_ = setupTest(t,
map[string]string{
".": `
test_module {
name: "a",
}
`,
"dir1": `
soong_namespace {
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
// setupTest will report any errors
}
func TestDependingOnBlueprintModuleInRootNamespace(t *testing.T) {
_ = setupTest(t,
map[string]string{
".": `
blueprint_test_module {
name: "a",
}
`,
"dir1": `
soong_namespace {
}
blueprint_test_module {
name: "b",
deps: ["a"],
}
`,
},
)
// setupTest will report any errors
}
func TestDependingOnModuleInImportedNamespace(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir2": `
soong_namespace {
imports: ["dir1"],
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
a := getModule(ctx, "a")
b := getModule(ctx, "b")
if !dependsOn(ctx, b, a) {
t.Errorf("module b does not depend on module a in the same namespace")
}
}
func TestDependingOnModuleInNonImportedNamespace(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir2": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir3": `
soong_namespace {
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
expectedErrors := []error{
errors.New(
`dir3/Android.bp:4:4: "b" depends on undefined module "a"
Module "b" is defined in namespace "dir3" which can read these 2 namespaces: ["dir3" "."]
Module "a" can be found in these namespaces: ["dir1" "dir2"]`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestDependingOnModuleByFullyQualifiedReference(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir2": `
soong_namespace {
}
test_module {
name: "b",
deps: ["//dir1:a"],
}
`,
},
)
a := getModule(ctx, "a")
b := getModule(ctx, "b")
if !dependsOn(ctx, b, a) {
t.Errorf("module b does not depend on module a")
}
}
func TestSameNameInTwoNamespaces(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
id: "1",
}
test_module {
name: "b",
deps: ["a"],
id: "2",
}
`,
"dir2": `
soong_namespace {
}
test_module {
name: "a",
id:"3",
}
test_module {
name: "b",
deps: ["a"],
id:"4",
}
`,
},
)
one := findModuleById(ctx, "1")
two := findModuleById(ctx, "2")
three := findModuleById(ctx, "3")
four := findModuleById(ctx, "4")
if !dependsOn(ctx, two, one) {
t.Fatalf("Module 2 does not depend on module 1 in its namespace")
}
if dependsOn(ctx, two, three) {
t.Fatalf("Module 2 depends on module 3 in another namespace")
}
if !dependsOn(ctx, four, three) {
t.Fatalf("Module 4 does not depend on module 3 in its namespace")
}
if dependsOn(ctx, four, one) {
t.Fatalf("Module 4 depends on module 1 in another namespace")
}
}
func TestSearchOrder(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
id: "1",
}
`,
"dir2": `
soong_namespace {
}
test_module {
name: "a",
id:"2",
}
test_module {
name: "b",
id:"3",
}
`,
"dir3": `
soong_namespace {
}
test_module {
name: "a",
id:"4",
}
test_module {
name: "b",
id:"5",
}
test_module {
name: "c",
id:"6",
}
`,
".": `
test_module {
name: "a",
id: "7",
}
test_module {
name: "b",
id: "8",
}
test_module {
name: "c",
id: "9",
}
test_module {
name: "d",
id: "10",
}
`,
"dir4": `
soong_namespace {
imports: ["dir1", "dir2", "dir3"]
}
test_module {
name: "test_me",
id:"0",
deps: ["a", "b", "c", "d"],
}
`,
},
)
testMe := findModuleById(ctx, "0")
if !dependsOn(ctx, testMe, findModuleById(ctx, "1")) {
t.Errorf("test_me doesn't depend on id 1")
}
if !dependsOn(ctx, testMe, findModuleById(ctx, "3")) {
t.Errorf("test_me doesn't depend on id 3")
}
if !dependsOn(ctx, testMe, findModuleById(ctx, "6")) {
t.Errorf("test_me doesn't depend on id 6")
}
if !dependsOn(ctx, testMe, findModuleById(ctx, "10")) {
t.Errorf("test_me doesn't depend on id 10")
}
if numDeps(ctx, testMe) != 4 {
t.Errorf("num dependencies of test_me = %v, not 4\n", numDeps(ctx, testMe))
}
}
func TestTwoNamespacesCanImportEachOther(t *testing.T) {
_ = setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
imports: ["dir2"]
}
test_module {
name: "a",
}
test_module {
name: "c",
deps: ["b"],
}
`,
"dir2": `
soong_namespace {
imports: ["dir1"],
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
// setupTest will report any errors
}
func TestImportingNonexistentNamespace(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
imports: ["a_nonexistent_namespace"]
}
test_module {
name: "a",
deps: ["a_nonexistent_module"]
}
`,
},
)
// should complain about the missing namespace and not complain about the unresolvable dependency
expectedErrors := []error{
errors.New(`dir1/Android.bp:2:4: module "soong_namespace": namespace a_nonexistent_namespace does not exist`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestNamespacesDontInheritParentNamespaces(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir1/subdir1": `
soong_namespace {
}
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
expectedErrors := []error{
errors.New(`dir1/subdir1/Android.bp:4:4: "b" depends on undefined module "a"
Module "b" is defined in namespace "dir1/subdir1" which can read these 2 namespaces: ["dir1/subdir1" "."]
Module "a" can be found in these namespaces: ["dir1"]`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestModulesDoReceiveParentNamespace(t *testing.T) {
_ = setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir1/subdir": `
test_module {
name: "b",
deps: ["a"],
}
`,
},
)
// setupTest will report any errors
}
func TestNamespaceImportsNotTransitive(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
}
`,
"dir2": `
soong_namespace {
imports: ["dir1"],
}
test_module {
name: "b",
deps: ["a"],
}
`,
"dir3": `
soong_namespace {
imports: ["dir2"],
}
test_module {
name: "c",
deps: ["a"],
}
`,
},
)
expectedErrors := []error{
errors.New(`dir3/Android.bp:5:4: "c" depends on undefined module "a"
Module "c" is defined in namespace "dir3" which can read these 3 namespaces: ["dir3" "dir2" "."]
Module "a" can be found in these namespaces: ["dir1"]`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestTwoNamepacesInSameDir(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
}
soong_namespace {
}
`,
},
)
expectedErrors := []error{
errors.New(`dir1/Android.bp:4:4: namespace dir1 already exists`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestNamespaceNotAtTopOfFile(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
test_module {
name: "a"
}
soong_namespace {
}
`,
},
)
expectedErrors := []error{
errors.New(`dir1/Android.bp:5:4: a namespace must be the first module in the file`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestTwoModulesWithSameNameInSameNamespace(t *testing.T) {
_, errs := setupTestExpectErrs(
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a"
}
test_module {
name: "a"
}
`,
},
)
expectedErrors := []error{
errors.New(`dir1/Android.bp:7:4: module "a" already defined
dir1/Android.bp:4:4 <-- previous definition here`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
func TestDeclaringNamespaceInNonAndroidBpFile(t *testing.T) {
_, errs := setupTestFromFiles(
map[string][]byte{
"Android.bp": []byte(`
build = ["include.bp"]
`),
"include.bp": []byte(`
soong_namespace {
}
`),
},
)
expectedErrors := []error{
errors.New(`include.bp:2:5: A namespace may only be declared in a file named Android.bp`),
}
if len(errs) != 1 || errs[0].Error() != expectedErrors[0].Error() {
t.Errorf("Incorrect errors. Expected:\n%v\n, got:\n%v\n", expectedErrors, errs)
}
}
// so that the generated .ninja file will have consistent names
func TestConsistentNamespaceNames(t *testing.T) {
ctx := setupTest(t,
map[string]string{
"dir1": "soong_namespace{}",
"dir2": "soong_namespace{}",
"dir3": "soong_namespace{}",
})
ns1, _ := ctx.NameResolver.namespaceAt("dir1")
ns2, _ := ctx.NameResolver.namespaceAt("dir2")
ns3, _ := ctx.NameResolver.namespaceAt("dir3")
actualIds := []string{ns1.id, ns2.id, ns3.id}
expectedIds := []string{"1", "2", "3"}
if !reflect.DeepEqual(actualIds, expectedIds) {
t.Errorf("Incorrect namespace ids.\nactual: %s\nexpected: %s\n", actualIds, expectedIds)
}
}
// so that the generated .ninja file will have consistent names
func TestRename(t *testing.T) {
_ = setupTest(t,
map[string]string{
"dir1": `
soong_namespace {
}
test_module {
name: "a",
deps: ["c"],
}
test_module {
name: "b",
rename: "c",
}
`})
// setupTest will report any errors
}
// some utils to support the tests
func mockFiles(bps map[string]string) (files map[string][]byte) {
files = make(map[string][]byte, len(bps))
files["Android.bp"] = []byte("")
for dir, text := range bps {
files[filepath.Join(dir, "Android.bp")] = []byte(text)
}
return files
}
func setupTestFromFiles(bps map[string][]byte) (ctx *TestContext, errs []error) {
config := TestConfig(buildDir, nil, "", bps)
ctx = NewTestContext()
ctx.RegisterModuleType("test_module", newTestModule)
ctx.RegisterModuleType("soong_namespace", NamespaceFactory)
ctx.Context.RegisterModuleType("blueprint_test_module", newBlueprintTestModule)
ctx.PreArchMutators(RegisterNamespaceMutator)
ctx.PreDepsMutators(func(ctx RegisterMutatorsContext) {
ctx.BottomUp("rename", renameMutator)
})
ctx.Register(config)
_, errs = ctx.ParseBlueprintsFiles("Android.bp")
if len(errs) > 0 {
return ctx, errs
}
_, errs = ctx.PrepareBuildActions(config)
return ctx, errs
}
func setupTestExpectErrs(bps map[string]string) (ctx *TestContext, errs []error) {
files := make(map[string][]byte, len(bps))
files["Android.bp"] = []byte("")
for dir, text := range bps {
files[filepath.Join(dir, "Android.bp")] = []byte(text)
}
return setupTestFromFiles(files)
}
func setupTest(t *testing.T, bps map[string]string) (ctx *TestContext) {
t.Helper()
ctx, errs := setupTestExpectErrs(bps)
FailIfErrored(t, errs)
return ctx
}
func dependsOn(ctx *TestContext, module TestingModule, possibleDependency TestingModule) bool {
depends := false
visit := func(dependency blueprint.Module) {
if dependency == possibleDependency.module {
depends = true
}
}
ctx.VisitDirectDeps(module.module, visit)
return depends
}
func numDeps(ctx *TestContext, module TestingModule) int {
count := 0
visit := func(dependency blueprint.Module) {
count++
}
ctx.VisitDirectDeps(module.module, visit)
return count
}
func getModule(ctx *TestContext, moduleName string) TestingModule {
return ctx.ModuleForTests(moduleName, "")
}
func findModuleById(ctx *TestContext, id string) (module TestingModule) {
visit := func(candidate blueprint.Module) {
testModule, ok := candidate.(*testModule)
if ok {
if testModule.properties.Id == id {
module = TestingModule{testModule}
}
}
}
ctx.VisitAllModules(visit)
return module
}
type testModule struct {
ModuleBase
properties struct {
Rename string
Deps []string
Id string
}
}
func (m *testModule) DepsMutator(ctx BottomUpMutatorContext) {
if m.properties.Rename != "" {
ctx.Rename(m.properties.Rename)
}
for _, d := range m.properties.Deps {
ctx.AddDependency(ctx.Module(), nil, d)
}
}
func (m *testModule) GenerateAndroidBuildActions(ModuleContext) {
}
func renameMutator(ctx BottomUpMutatorContext) {
if m, ok := ctx.Module().(*testModule); ok {
if m.properties.Rename != "" {
ctx.Rename(m.properties.Rename)
}
}
}
func newTestModule() Module {
m := &testModule{}
m.AddProperties(&m.properties)
InitAndroidModule(m)
return m
}
type blueprintTestModule struct {
blueprint.SimpleName
properties struct {
Deps []string
}
}
func (b *blueprintTestModule) DynamicDependencies(ctx blueprint.DynamicDependerModuleContext) []string {
return b.properties.Deps
}
func (b *blueprintTestModule) GenerateBuildActions(blueprint.ModuleContext) {
}
func newBlueprintTestModule() (blueprint.Module, []interface{}) {
m := &blueprintTestModule{}
return m, []interface{}{&m.properties, &m.SimpleName.Properties}
}
|
package statics
/*
BPAsset satisfies AssetEmbedder interface
This one is namespaced for root of the boilerplate directory
see boilerplate directory in bootstrap to see the content.
*/
import (
"github.com/getcouragenow/core-bs/sdk/pkg/common/embed"
_ "github.com/getcouragenow/core-bs/statiks/bp"
"github.com/rakyll/statik/fs"
"net/http"
)
type BPAsset struct {
fsys http.FileSystem // the rakyll fs
}
// func filterNS(namespaces []string, arg string) bool {
// for _, ns := range namespaces {
// if ns == arg {
// return true
// }
// }
// return false
// }
// NewBPAsset function to filter valid namespace
// for now this will be hardcoded, later down the line,
// it will be generated.
// func NewBPAsset(namespaces []string, namespaceArg string) (embed.AssetEmbedder, error) {
func NewBPAsset(namespaceArg string) (embed.AssetEmbedder, error) {
// found := filterNS(namespaces, namespaceArg)
// if !found {
// return nil, errors.New(
// fmt.Sprintf("namespace not found: %s", namespaceArg),
// )
// }
return newBPAsset(namespaceArg)
}
// NewBPAsset will return BPAsset
func newBPAsset(namespace string) (embed.AssetEmbedder, error) {
bfs, err := fs.NewWithNamespace(namespace)
if err != nil {
return nil, err
}
return &BPAsset{
fsys: bfs,
}, nil
}
func (r *BPAsset) GetFS() http.FileSystem { return r.fsys }
func (r *BPAsset) WriteAllFiles(outputPath string) error {
return writeAllFiles(r.fsys, outputPath)
}
func (r *BPAsset) ReadSingleFile(name string) ([]byte, error) {
return readSingleFile(r.fsys, name)
}
|
package templates
import (
"text/template"
"github.com/lithammer/dedent"
)
var (
HarborConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
## Configuration file of Harbor
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version = 1.5.0
#The IP address or hostname to access admin UI and registry service.
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname = {{ .HostName }}
#The protocol for accessing the UI and token/notification service, by default it is http.
#It can be set to https if ssl is enabled on nginx.
ui_url_protocol = {{ .Scheme }}
#Maximum number of job workers in job service
max_job_workers = 50
#Determine whether or not to generate certificate for the registry's token.
#If the value is on, the prepare script creates new root cert and private key
#for generating token to access the registry. If the value is off the default key/cert will be used.
#This flag also controls the creation of the notary signer's cert.
customize_crt = on
#The path of cert and key files for nginx, they are applied only the protocol is set to https
ssl_cert = {{ .SSLCert }}
ssl_cert_key = {{ .SSLCertKey }}
#The path of secretkey storage
secretkey_path = /data
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
admiral_url = NA
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
log_rotate_count = 50
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
#are all valid.
log_rotate_size = 200M
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
#Clair doesn't need to connect to harbor ui container via http proxy.
http_proxy =
https_proxy =
no_proxy = 127.0.0.1,localhost,ui
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
#only take effect in the first boot, the subsequent changes of these properties
#should be performed on web ui
#************************BEGIN INITIAL PROPERTIES************************
#Email account settings for sending out password resetting emails.
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
#Identity left blank to act as username.
email_identity =
email_server = smtp.mydomain.com
email_server_port = 25
email_username = sample_admin@mydomain.com
email_password = abc
email_from = admin <sample_admin@mydomain.com>
email_ssl = false
email_insecure = false
##The initial password of Harbor admin, only works for the first time when Harbor starts.
#It has no effect after the first launch of Harbor.
#Change the admin password from UI after launching Harbor.
harbor_admin_password = {{ .AdminPassword }}
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
auth_mode = db_auth
#The url for an ldap endpoint.
ldap_url = ldaps://ldap.mydomain.com
#A user's DN who has the permission to search the LDAP/AD server.
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
#the password of the ldap_searchdn
#ldap_search_pwd = password
#The base DN from which to look up a user in LDAP/AD
ldap_basedn = ou=people,dc=mydomain,dc=com
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
#ldap_filter = (objectClass=person)
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
ldap_uid = uid
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
ldap_scope = 2
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
ldap_timeout = 5
#Verify certificate from LDAP server
ldap_verify_cert = true
#The base dn from which to lookup a group in LDAP/AD
ldap_group_basedn = ou=group,dc=mydomain,dc=com
#filter to search LDAP/AD group
ldap_group_filter = objectclass=group
#The attribute used to name a LDAP/AD group, it could be cn, name
ldap_group_gid = cn
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
ldap_group_scope = 2
#Turn on or off the self-registration feature
self_registration = on
#The expiration time (in minute) of token created by token service, default is 30 minutes
token_expiration = 30
#The flag to control what users have permission to create projects
#The default value "everyone" allows everyone to creates a project.
#Set to "adminonly" so that only admin user can create project.
project_creation_restriction = everyone
#************************END INITIAL PROPERTIES************************
#######Harbor DB configuration section#######
#The address of the Harbor database. Only need to change when using external db.
db_host = mysql
#The password for the root user of Harbor DB. Change this before any production use.
db_password = {{ .DBPassword }}
#The port of Harbor database host
db_port = 3306
#The user name of Harbor database
db_user = root
##### End of Harbor DB configuration#######
#The redis server address. Only needed in HA installation.
#address:port[,weight,password,db_index]
redis_url = redis:6379
##########Clair DB configuration############
#Clair DB host address. Only change it when using an exteral DB.
clair_db_host = postgres
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
clair_db_password = password
#Clair DB connect port
clair_db_port = 5432
#Clair DB username
clair_db_username = postgres
#Clair default database
clair_db = postgres
##########End of Clair DB configuration############
#The following attributes only need to be set when auth mode is uaa_auth
uaa_endpoint = uaa.mydomain.org
uaa_clientid = id
uaa_clientsecret = secret
uaa_verify_cert = true
uaa_ca_cert = /path/to/ca.pem
### Docker Registry setting ###
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
registry_storage_provider_name = filesystem
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
registry_storage_provider_config =
`)))
HarborPrepareTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals # We require Python 2.6 or later
from string import Template
import random
import string
import os
import sys
import argparse
import subprocess
import shutil
from io import open
if sys.version_info[:3][0] == 2:
import ConfigParser as ConfigParser
import StringIO as StringIO
if sys.version_info[:3][0] == 3:
import configparser as ConfigParser
import io as StringIO
def validate(conf, args):
if args.ha_mode:
db_host = rcp.get("configuration", "db_host")
if db_host == "mysql":
raise Exception(
"Error: In HA mode, db_host in harbor.cfg needs to point to an external DB address."
)
registry_storage_provider_name = rcp.get(
"configuration", "registry_storage_provider_name").strip()
if registry_storage_provider_name == "filesystem" and not args.yes:
msg = 'Is the Harbor Docker Registry configured to use shared storage (e.g. NFS, Ceph etc.)? [yes/no]:'
if raw_input(msg).lower() != "yes":
raise Exception(
"Error: In HA mode, shared storage configuration for Docker Registry in harbor.cfg is required. Refer to HA installation guide for details."
)
redis_url = rcp.get("configuration", "redis_url")
if redis_url is None or len(redis_url) < 1:
raise Exception(
"Error: In HA mode, redis_url in harbor.cfg needs to point to a Redis cluster."
)
if args.notary_mode:
raise Exception("Error: HA mode doesn't support Notary currently")
if args.clair_mode:
clair_db_host = rcp.get("configuration", "clair_db_host")
if "postgres" == clair_db_host:
raise Exception(
"Error: In HA mode, clair_db_host in harbor.cfg needs to point to an external Postgres DB address."
)
cert_path = rcp.get("configuration", "ssl_cert")
cert_key_path = rcp.get("configuration", "ssl_cert_key")
shared_cert_key = os.path.join(base_dir, "ha",
os.path.basename(cert_key_path))
shared_cert_path = os.path.join(base_dir, "ha",
os.path.basename(cert_path))
if os.path.isfile(shared_cert_key):
shutil.copy2(shared_cert_key, cert_key_path)
if os.path.isfile(shared_cert_path):
shutil.copy2(shared_cert_path, cert_path)
protocol = rcp.get("configuration", "ui_url_protocol")
if protocol != "https" and args.notary_mode:
raise Exception(
"Error: the protocol must be https when Harbor is deployed with Notary"
)
if protocol == "https":
if not rcp.has_option("configuration", "ssl_cert"):
raise Exception(
"Error: The protocol is https but attribute ssl_cert is not set"
)
cert_path = rcp.get("configuration", "ssl_cert")
if not os.path.isfile(cert_path):
raise Exception(
"Error: The path for certificate: %s is invalid" % cert_path)
if not rcp.has_option("configuration", "ssl_cert_key"):
raise Exception(
"Error: The protocol is https but attribute ssl_cert_key is not set"
)
cert_key_path = rcp.get("configuration", "ssl_cert_key")
if not os.path.isfile(cert_key_path):
raise Exception(
"Error: The path for certificate key: %s is invalid" %
cert_key_path)
project_creation = rcp.get("configuration", "project_creation_restriction")
if project_creation != "everyone" and project_creation != "adminonly":
raise Exception(
"Error invalid value for project_creation_restriction: %s" %
project_creation)
def prepare_ha(conf, args):
#files under ha folder will have high prority
protocol = rcp.get("configuration", "ui_url_protocol")
if protocol == "https":
#copy nginx certificate
cert_path = rcp.get("configuration", "ssl_cert")
cert_key_path = rcp.get("configuration", "ssl_cert_key")
shared_cert_key = os.path.join(base_dir, "ha",
os.path.basename(cert_key_path))
shared_cert_path = os.path.join(base_dir, "ha",
os.path.basename(cert_path))
if os.path.isfile(shared_cert_key):
shutil.copy2(shared_cert_key, cert_key_path)
else:
if os.path.isfile(cert_key_path):
shutil.copy2(cert_key_path, shared_cert_key)
if os.path.isfile(shared_cert_path):
shutil.copy2(shared_cert_path, cert_path)
else:
if os.path.isfile(cert_path):
shutil.copy2(cert_path, shared_cert_path)
#check if ca exsit
cert_ca_path = "/data/ca_download/ca.crt"
shared_ca_path = os.path.join(base_dir, "ha",
os.path.basename(cert_ca_path))
if os.path.isfile(shared_ca_path):
shutil.copy2(shared_ca_path, cert_ca_path)
else:
if os.path.isfile(cert_ca_path):
shutil.copy2(cert_ca_path, shared_ca_path)
#check root.crt and priviate_key.pem
private_key_pem = os.path.join(config_dir, "ui", "private_key.pem")
root_crt = os.path.join(config_dir, "registry", "root.crt")
shared_private_key_pem = os.path.join(base_dir, "ha", "private_key.pem")
shared_root_crt = os.path.join(base_dir, "ha", "root.crt")
if os.path.isfile(shared_private_key_pem):
shutil.copy2(shared_private_key_pem, private_key_pem)
else:
if os.path.isfile(private_key_pem):
shutil.copy2(private_key_pem, shared_private_key_pem)
if os.path.isfile(shared_root_crt):
shutil.copy2(shared_root_crt, root_crt)
else:
if os.path.isfile(root_crt):
shutil.copy2(root_crt, shared_root_crt)
#secretkey
shared_secret_key = os.path.join(base_dir, "ha", "secretkey")
secretkey_path = rcp.get("configuration", "secretkey_path")
secret_key = os.path.join(secretkey_path, "secretkey")
if os.path.isfile(shared_secret_key):
shutil.copy2(shared_secret_key, secret_key)
else:
if os.path.isfile(secret_key):
shutil.copy2(secret_key, shared_secret_key)
def get_secret_key(path):
secret_key = _get_secret(path, "secretkey")
if len(secret_key) != 16:
raise Exception(
"secret key's length has to be 16 chars, current length: %d" %
len(secret_key))
return secret_key
def get_alias(path):
alias = _get_secret(path, "defaultalias", length=8)
return alias
def _get_secret(folder, filename, length=16):
key_file = os.path.join(folder, filename)
if os.path.isfile(key_file):
with open(key_file, 'r') as f:
key = f.read()
print("loaded secret from file: %s" % key_file)
return key
if not os.path.isdir(folder):
os.makedirs(folder, mode=0o600)
key = ''.join(
random.choice(string.ascii_letters + string.digits)
for i in range(length))
with open(key_file, 'w') as f:
f.write(key)
print("Generated and saved secret to file: %s" % key_file)
os.chmod(key_file, 0o600)
return key
def prep_conf_dir(root, name):
absolute_path = os.path.join(root, name)
if not os.path.exists(absolute_path):
os.makedirs(absolute_path)
return absolute_path
def render(src, dest, **kw):
t = Template(open(src, 'r').read())
with open(dest, 'w') as f:
f.write(t.substitute(**kw))
print("Generated configuration file: %s" % dest)
base_dir = os.path.dirname(__file__)
config_dir = os.path.join(base_dir, "common/config")
templates_dir = os.path.join(base_dir, "common/templates")
def delfile(src):
if os.path.isfile(src):
try:
os.remove(src)
print("Clearing the configuration file: %s" % src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delfile(itemsrc)
parser = argparse.ArgumentParser()
parser.add_argument(
'--conf',
dest='cfgfile',
default=base_dir + '/harbor.cfg',
type=str,
help="the path of Harbor configuration file")
parser.add_argument(
'--with-notary',
dest='notary_mode',
default=False,
action='store_true',
help="the Harbor instance is to be deployed with notary")
parser.add_argument(
'--with-clair',
dest='clair_mode',
default=False,
action='store_true',
help="the Harbor instance is to be deployed with clair")
parser.add_argument(
'--ha',
dest='ha_mode',
default=False,
action='store_true',
help="the Harbor instance is to be deployed in HA mode")
parser.add_argument(
'--yes',
dest='yes',
default=False,
action='store_true',
help="Answer yes to all questions")
args = parser.parse_args()
delfile(config_dir)
#Read configurations
conf = StringIO.StringIO()
conf.write("[configuration]\n")
conf.write(open(args.cfgfile).read())
conf.seek(0, os.SEEK_SET)
rcp = ConfigParser.RawConfigParser()
rcp.readfp(conf)
validate(rcp, args)
reload_config = rcp.get("configuration", "reload_config") if rcp.has_option(
"configuration", "reload_config") else "false"
hostname = rcp.get("configuration", "hostname")
protocol = rcp.get("configuration", "ui_url_protocol")
public_url = protocol + "://" + hostname
email_identity = rcp.get("configuration", "email_identity")
email_host = rcp.get("configuration", "email_server")
email_port = rcp.get("configuration", "email_server_port")
email_usr = rcp.get("configuration", "email_username")
email_pwd = rcp.get("configuration", "email_password")
email_from = rcp.get("configuration", "email_from")
email_ssl = rcp.get("configuration", "email_ssl")
email_insecure = rcp.get("configuration", "email_insecure")
harbor_admin_password = rcp.get("configuration", "harbor_admin_password")
auth_mode = rcp.get("configuration", "auth_mode")
ldap_url = rcp.get("configuration", "ldap_url")
# this two options are either both set or unset
if rcp.has_option("configuration", "ldap_searchdn"):
ldap_searchdn = rcp.get("configuration", "ldap_searchdn")
ldap_search_pwd = rcp.get("configuration", "ldap_search_pwd")
else:
ldap_searchdn = ""
ldap_search_pwd = ""
ldap_basedn = rcp.get("configuration", "ldap_basedn")
# ldap_filter is null by default
if rcp.has_option("configuration", "ldap_filter"):
ldap_filter = rcp.get("configuration", "ldap_filter")
else:
ldap_filter = ""
ldap_uid = rcp.get("configuration", "ldap_uid")
ldap_scope = rcp.get("configuration", "ldap_scope")
ldap_timeout = rcp.get("configuration", "ldap_timeout")
ldap_verify_cert = rcp.get("configuration", "ldap_verify_cert")
ldap_group_basedn = rcp.get("configuration", "ldap_group_basedn")
ldap_group_filter = rcp.get("configuration", "ldap_group_filter")
ldap_group_gid = rcp.get("configuration", "ldap_group_gid")
ldap_group_scope = rcp.get("configuration", "ldap_group_scope")
db_password = rcp.get("configuration", "db_password")
db_host = rcp.get("configuration", "db_host")
db_user = rcp.get("configuration", "db_user")
db_port = rcp.get("configuration", "db_port")
self_registration = rcp.get("configuration", "self_registration")
if protocol == "https":
cert_path = rcp.get("configuration", "ssl_cert")
cert_key_path = rcp.get("configuration", "ssl_cert_key")
customize_crt = rcp.get("configuration", "customize_crt")
max_job_workers = rcp.get("configuration", "max_job_workers")
token_expiration = rcp.get("configuration", "token_expiration")
proj_cre_restriction = rcp.get("configuration", "project_creation_restriction")
secretkey_path = rcp.get("configuration", "secretkey_path")
if rcp.has_option("configuration", "admiral_url"):
admiral_url = rcp.get("configuration", "admiral_url")
else:
admiral_url = ""
clair_db_password = rcp.get("configuration", "clair_db_password")
clair_db_host = rcp.get("configuration", "clair_db_host")
clair_db_port = rcp.get("configuration", "clair_db_port")
clair_db_username = rcp.get("configuration", "clair_db_username")
clair_db = rcp.get("configuration", "clair_db")
uaa_endpoint = rcp.get("configuration", "uaa_endpoint")
uaa_clientid = rcp.get("configuration", "uaa_clientid")
uaa_clientsecret = rcp.get("configuration", "uaa_clientsecret")
uaa_verify_cert = rcp.get("configuration", "uaa_verify_cert")
uaa_ca_cert = rcp.get("configuration", "uaa_ca_cert")
secret_key = get_secret_key(secretkey_path)
log_rotate_count = rcp.get("configuration", "log_rotate_count")
log_rotate_size = rcp.get("configuration", "log_rotate_size")
if rcp.has_option("configuration", "redis_url"):
redis_url = rcp.get("configuration", "redis_url")
else:
redis_url = ""
storage_provider_name = rcp.get("configuration",
"registry_storage_provider_name").strip()
storage_provider_config = rcp.get("configuration",
"registry_storage_provider_config").strip()
# yaml requires 1 or more spaces between the key and value
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
ui_secret = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(16))
jobservice_secret = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(16))
adminserver_config_dir = os.path.join(config_dir, "adminserver")
if not os.path.exists(adminserver_config_dir):
os.makedirs(os.path.join(config_dir, "adminserver"))
ui_config_dir = prep_conf_dir(config_dir, "ui")
ui_certificates_dir = prep_conf_dir(ui_config_dir, "certificates")
db_config_dir = prep_conf_dir(config_dir, "db")
job_config_dir = prep_conf_dir(config_dir, "jobservice")
registry_config_dir = prep_conf_dir(config_dir, "registry")
nginx_config_dir = prep_conf_dir(config_dir, "nginx")
nginx_conf_d = prep_conf_dir(nginx_config_dir, "conf.d")
log_config_dir = prep_conf_dir(config_dir, "log")
adminserver_conf_env = os.path.join(config_dir, "adminserver", "env")
ui_conf_env = os.path.join(config_dir, "ui", "env")
ui_conf = os.path.join(config_dir, "ui", "app.conf")
ui_cert_dir = os.path.join(config_dir, "ui", "certificates")
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
registry_conf = os.path.join(config_dir, "registry", "config.yml")
db_conf_env = os.path.join(config_dir, "db", "env")
job_conf_env = os.path.join(config_dir, "jobservice", "env")
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
cert_dir = os.path.join(config_dir, "nginx", "cert")
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
adminserver_url = "http://adminserver:8080"
registry_url = "http://registry:5000"
ui_url = "http://ui:8080"
token_service_url = "http://ui:8080/service/token"
jobservice_url = "http://jobservice:8080"
clair_url = "http://clair:6060"
notary_url = "http://notary-server:4443"
if protocol == "https":
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
if not os.path.exists(cert_dir):
os.makedirs(cert_dir)
shutil.copy2(cert_path, target_cert_path)
target_cert_key_path = os.path.join(cert_dir,
os.path.basename(cert_key_path))
shutil.copy2(cert_key_path, target_cert_key_path)
render(
os.path.join(templates_dir, "nginx", "nginx.https.conf"),
nginx_conf,
ssl_cert=os.path.join("/etc/nginx/cert",
os.path.basename(target_cert_path)),
ssl_cert_key=os.path.join("/etc/nginx/cert",
os.path.basename(target_cert_key_path)))
else:
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"), nginx_conf)
render(
os.path.join(templates_dir, "adminserver", "env"),
adminserver_conf_env,
reload_config=reload_config,
public_url=public_url,
ui_url=ui_url,
auth_mode=auth_mode,
self_registration=self_registration,
ldap_url=ldap_url,
ldap_searchdn=ldap_searchdn,
ldap_search_pwd=ldap_search_pwd,
ldap_basedn=ldap_basedn,
ldap_filter=ldap_filter,
ldap_uid=ldap_uid,
ldap_scope=ldap_scope,
ldap_verify_cert=ldap_verify_cert,
ldap_timeout=ldap_timeout,
ldap_group_basedn=ldap_group_basedn,
ldap_group_filter=ldap_group_filter,
ldap_group_gid=ldap_group_gid,
ldap_group_scope=ldap_group_scope,
db_password=db_password,
db_host=db_host,
db_user=db_user,
db_port=db_port,
email_host=email_host,
email_port=email_port,
email_usr=email_usr,
email_pwd=email_pwd,
email_ssl=email_ssl,
email_insecure=email_insecure,
email_from=email_from,
email_identity=email_identity,
harbor_admin_password=harbor_admin_password,
project_creation_restriction=proj_cre_restriction,
max_job_workers=max_job_workers,
ui_secret=ui_secret,
jobservice_secret=jobservice_secret,
token_expiration=token_expiration,
admiral_url=admiral_url,
with_notary=args.notary_mode,
with_clair=args.clair_mode,
clair_db_password=clair_db_password,
clair_db_host=clair_db_host,
clair_db_port=clair_db_port,
clair_db_username=clair_db_username,
clair_db=clair_db,
uaa_endpoint=uaa_endpoint,
uaa_clientid=uaa_clientid,
uaa_clientsecret=uaa_clientsecret,
uaa_verify_cert=uaa_verify_cert,
storage_provider_name=storage_provider_name,
registry_url=registry_url,
token_service_url=token_service_url,
jobservice_url=jobservice_url,
clair_url=clair_url,
notary_url=notary_url)
render(
os.path.join(templates_dir, "ui", "env"),
ui_conf_env,
ui_secret=ui_secret,
jobservice_secret=jobservice_secret,
redis_url=redis_url,
adminserver_url=adminserver_url)
registry_config_file = "config_ha.yml" if args.ha_mode else "config.yml"
if storage_provider_name == "filesystem":
if not storage_provider_config:
storage_provider_config = "rootdirectory: /storage"
elif "rootdirectory:" not in storage_provider_config:
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
# generate storage configuration section in yaml format
storage_provider_info = (
'\n' + ' ' * 4).join([storage_provider_name + ':'] +
map(string.strip, storage_provider_config.split(",")))
render(
os.path.join(templates_dir, "registry", registry_config_file),
registry_conf,
storage_provider_info=storage_provider_info,
public_url=public_url,
ui_url=ui_url,
redis_url=redis_url)
render(
os.path.join(templates_dir, "db", "env"),
db_conf_env,
db_password=db_password)
render(
os.path.join(templates_dir, "jobservice", "env"),
job_conf_env,
ui_secret=ui_secret,
jobservice_secret=jobservice_secret,
adminserver_url=adminserver_url)
render(
os.path.join(templates_dir, "jobservice", "config.yml"),
jobservice_conf,
max_job_workers=max_job_workers,
redis_url=redis_url)
render(
os.path.join(templates_dir, "log", "logrotate.conf"),
log_rotate_config,
log_rotate_count=log_rotate_count,
log_rotate_size=log_rotate_size)
print("Generated configuration file: %s" % jobservice_conf)
print("Generated configuration file: %s" % ui_conf)
shutil.copyfile(os.path.join(templates_dir, "ui", "app.conf"), ui_conf)
if auth_mode == "uaa_auth":
if os.path.isfile(uaa_ca_cert):
if not os.path.isdir(ui_cert_dir):
os.makedirs(ui_cert_dir, mode=0o600)
ui_uaa_ca = os.path.join(ui_cert_dir, "uaa_ca.pem")
print("Copying UAA CA cert to %s" % ui_uaa_ca)
shutil.copyfile(uaa_ca_cert, ui_uaa_ca)
else:
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
def validate_crt_subj(dirty_subj):
subj_list = [item for item in dirty_subj.strip().split("/") \
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
return "/" + "/".join(subj_list)
FNULL = open(os.devnull, 'w')
from functools import wraps
def stat_decorator(func):
@wraps(func)
def check_wrapper(*args, **kw):
stat = func(*args, **kw)
message = "Generated certificate, key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path']) \
if stat == 0 else "Fail to generate key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path'])
print(message)
if stat != 0:
sys.exit(1)
return check_wrapper
@stat_decorator
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
rc = subprocess.call(
["openssl", "genrsa", "-out", key_path, "4096"],
stdout=FNULL,
stderr=subprocess.STDOUT)
if rc != 0:
return rc
return subprocess.call(["openssl", "req", "-new", "-x509", "-key", key_path,\
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
@stat_decorator
def create_cert(subj,
ca_key,
ca_cert,
key_path="./k.key",
cert_path="./cert.crt"):
cert_dir = os.path.dirname(cert_path)
csr_path = os.path.join(cert_dir, "tmp.csr")
rc = subprocess.call(["openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
"-out", csr_path, "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
if rc != 0:
return rc
return subprocess.call(["openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=FNULL, stderr=subprocess.STDOUT)
def openssl_installed():
shell_stat = subprocess.check_call(
["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
if shell_stat != 0:
print(
"Cannot find openssl installed in this computer\nUse default SSL certificate file"
)
return False
return True
if customize_crt == 'on' and openssl_installed():
shell_stat = subprocess.check_call(
["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
empty_subj = "/C=/ST=/L=/O=/CN=/"
private_key_pem = os.path.join(config_dir, "ui", "private_key.pem")
root_crt = os.path.join(config_dir, "registry", "root.crt")
create_root_cert(empty_subj, key_path=private_key_pem, cert_path=root_crt)
os.chmod(private_key_pem, 0o600)
os.chmod(root_crt, 0o600)
else:
print("Copied configuration file: %s" % ui_config_dir + "private_key.pem")
shutil.copyfile(
os.path.join(templates_dir, "ui", "private_key.pem"),
os.path.join(ui_config_dir, "private_key.pem"))
print("Copied configuration file: %s" % registry_config_dir + "root.crt")
shutil.copyfile(
os.path.join(templates_dir, "registry", "root.crt"),
os.path.join(registry_config_dir, "root.crt"))
if args.notary_mode:
notary_config_dir = prep_conf_dir(config_dir, "notary")
notary_temp_dir = os.path.join(templates_dir, "notary")
print("Copying sql file for notary DB")
if os.path.exists(os.path.join(notary_config_dir, "mysql-initdb.d")):
shutil.rmtree(os.path.join(notary_config_dir, "mysql-initdb.d"))
shutil.copytree(
os.path.join(notary_temp_dir, "mysql-initdb.d"),
os.path.join(notary_config_dir, "mysql-initdb.d"))
if customize_crt == 'on' and openssl_installed():
try:
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
if not os.path.exists(temp_cert_dir):
os.makedirs(temp_cert_dir)
ca_subj = "/C=US/ST=California/L=Palo Alto/O=VMware, Inc./OU=Harbor/CN=Self-signed by VMware, Inc."
cert_subj = "/C=US/ST=California/L=Palo Alto/O=VMware, Inc./OU=Harbor/CN=notarysigner"
signer_ca_cert = os.path.join(temp_cert_dir,
"notary-signer-ca.crt")
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
create_root_cert(
ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
create_cert(
cert_subj,
signer_ca_key,
signer_ca_cert,
key_path=signer_key_path,
cert_path=signer_cert_path)
print("Copying certs for notary signer")
os.chmod(signer_cert_path, 0o600)
os.chmod(signer_key_path, 0o600)
os.chmod(signer_ca_cert, 0o600)
shutil.copy2(signer_cert_path, notary_config_dir)
shutil.copy2(signer_key_path, notary_config_dir)
shutil.copy2(signer_ca_cert, notary_config_dir)
finally:
srl_tmp = os.path.join(os.getcwd(), ".srl")
if os.path.isfile(srl_tmp):
os.remove(srl_tmp)
if os.path.isdir(temp_cert_dir):
shutil.rmtree(temp_cert_dir, True)
else:
print("Copying certs for notary signer")
shutil.copy2(
os.path.join(notary_temp_dir, "notary-signer.crt"),
notary_config_dir)
shutil.copy2(
os.path.join(notary_temp_dir, "notary-signer.key"),
notary_config_dir)
shutil.copy2(
os.path.join(notary_temp_dir, "notary-signer-ca.crt"),
notary_config_dir)
shutil.copy2(
os.path.join(registry_config_dir, "root.crt"), notary_config_dir)
print("Copying notary signer configuration file")
shutil.copy2(
os.path.join(notary_temp_dir, "signer-config.json"), notary_config_dir)
render(
os.path.join(notary_temp_dir, "server-config.json"),
os.path.join(notary_config_dir, "server-config.json"),
token_endpoint=public_url)
print("Copying nginx configuration file for notary")
shutil.copy2(
os.path.join(templates_dir, "nginx", "notary.upstream.conf"),
nginx_conf_d)
render(
os.path.join(templates_dir, "nginx", "notary.server.conf"),
os.path.join(nginx_conf_d, "notary.server.conf"),
ssl_cert=os.path.join("/etc/nginx/cert",
os.path.basename(target_cert_path)),
ssl_cert_key=os.path.join("/etc/nginx/cert",
os.path.basename(target_cert_key_path)))
default_alias = get_alias(secretkey_path)
render(
os.path.join(notary_temp_dir, "signer_env"),
os.path.join(notary_config_dir, "signer_env"),
alias=default_alias)
if args.clair_mode:
clair_temp_dir = os.path.join(templates_dir, "clair")
clair_config_dir = prep_conf_dir(config_dir, "clair")
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
print("Copying offline data file for clair DB")
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
shutil.copytree(
os.path.join(clair_temp_dir, "postgresql-init.d"),
os.path.join(clair_config_dir, "postgresql-init.d"))
postgres_env = os.path.join(clair_config_dir, "postgres_env")
render(
os.path.join(clair_temp_dir, "postgres_env"),
postgres_env,
password=clair_db_password)
clair_conf = os.path.join(clair_config_dir, "config.yaml")
render(
os.path.join(clair_temp_dir, "config.yaml"),
clair_conf,
password=clair_db_password,
username=clair_db_username,
host=clair_db_host,
port=clair_db_port,
dbname=clair_db)
# config http proxy for Clair
http_proxy = rcp.get("configuration", "http_proxy").strip()
https_proxy = rcp.get("configuration", "https_proxy").strip()
no_proxy = rcp.get("configuration", "no_proxy").strip()
clair_env = os.path.join(clair_config_dir, "clair_env")
render(
os.path.join(clair_temp_dir, "clair_env"),
clair_env,
http_proxy=http_proxy,
https_proxy=https_proxy,
no_proxy=no_proxy)
if args.ha_mode:
prepare_ha(rcp, args)
FNULL.close()
print(
"The configuration files are ready, please use docker-compose to start the service."
)
`)))
HarborDockerComposeTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
version: '2'
services:
log:
image: vmware/harbor-log:v1.5.1
container_name: harbor-log
restart: always
volumes:
- /var/log/harbor/:/var/log/docker/:z
- ./common/config/log/:/etc/logrotate.d/:z
ports:
- 127.0.0.1:1514:10514
networks:
- harbor
registry:
image: vmware/registry-photon:v2.6.2-v1.5.1
container_name: registry
restart: always
volumes:
- /data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
networks:
- harbor
environment:
- GODEBUG=netdns=cgo
command:
["serve", "/etc/registry/config.yml"]
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "registry"
mysql:
image: vmware/harbor-db:v1.5.1
container_name: harbor-db
restart: always
volumes:
- /data/database:/var/lib/mysql:z
networks:
- harbor
env_file:
- ./common/config/db/env
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "mysql"
adminserver:
image: vmware/harbor-adminserver:v1.5.1
container_name: harbor-adminserver
env_file:
- ./common/config/adminserver/env
restart: always
volumes:
- /data/config/:/etc/adminserver/config/:z
- /data/secretkey:/etc/adminserver/key:z
- /data/:/data/:z
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "adminserver"
ui:
image: vmware/harbor-ui:v1.5.1
container_name: harbor-ui
env_file:
- ./common/config/ui/env
restart: always
volumes:
- ./common/config/ui/app.conf:/etc/ui/app.conf:z
- ./common/config/ui/private_key.pem:/etc/ui/private_key.pem:z
- ./common/config/ui/certificates/:/etc/ui/certificates/:z
- /data/secretkey:/etc/ui/key:z
- /data/ca_download/:/etc/ui/ca/:z
- /data/psc/:/etc/ui/token/:z
networks:
- harbor
depends_on:
- log
- adminserver
- registry
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "ui"
jobservice:
image: vmware/harbor-jobservice:v1.5.1
container_name: harbor-jobservice
env_file:
- ./common/config/jobservice/env
restart: always
volumes:
- /data/job_logs:/var/log/jobs:z
- ./common/config/jobservice/config.yml:/etc/jobservice/config.yml:z
networks:
- harbor
depends_on:
- redis
- ui
- adminserver
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "jobservice"
redis:
image: vmware/redis-photon:v1.5.1
container_name: redis
restart: always
volumes:
- /data/redis:/data
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "redis"
proxy:
image: vmware/nginx-photon:v1.5.1
container_name: nginx
restart: always
volumes:
- ./common/config/nginx:/etc/nginx:z
networks:
- harbor
ports:
- 80:80
- 443:443
- 4443:4443
depends_on:
- mysql
- registry
- ui
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "proxy"
networks:
harbor:
external: false
`)))
HarborAdminServerEnvTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
PORT=8080
LOG_LEVEL=info
EXT_ENDPOINT=$public_url
AUTH_MODE=$auth_mode
SELF_REGISTRATION=$self_registration
LDAP_URL=$ldap_url
LDAP_SEARCH_DN=$ldap_searchdn
LDAP_SEARCH_PWD=$ldap_search_pwd
LDAP_BASE_DN=$ldap_basedn
LDAP_FILTER=$ldap_filter
LDAP_UID=$ldap_uid
LDAP_SCOPE=$ldap_scope
LDAP_TIMEOUT=$ldap_timeout
LDAP_VERIFY_CERT=$ldap_verify_cert
LDAP_GROUP_BASEDN=$ldap_group_basedn
LDAP_GROUP_FILTER=$ldap_group_filter
LDAP_GROUP_GID=$ldap_group_gid
LDAP_GROUP_SCOPE=$ldap_group_scope
DATABASE_TYPE=mysql
MYSQL_HOST=$db_host
MYSQL_PORT=$db_port
MYSQL_USR=$db_user
MYSQL_PWD=$db_password
MYSQL_DATABASE=registry
REGISTRY_URL=$registry_url
TOKEN_SERVICE_URL=$token_service_url
EMAIL_HOST=$email_host
EMAIL_PORT=$email_port
EMAIL_USR=$email_usr
EMAIL_PWD=$email_pwd
EMAIL_SSL=$email_ssl
EMAIL_FROM=$email_from
EMAIL_IDENTITY=$email_identity
EMAIL_INSECURE=$email_insecure
HARBOR_ADMIN_PASSWORD=$harbor_admin_password
PROJECT_CREATION_RESTRICTION=$project_creation_restriction
MAX_JOB_WORKERS=$max_job_workers
UI_SECRET=$ui_secret
JOBSERVICE_SECRET=$jobservice_secret
TOKEN_EXPIRATION=$token_expiration
CFG_EXPIRATION=5
GODEBUG=netdns=cgo
ADMIRAL_URL=$admiral_url
WITH_NOTARY=$with_notary
WITH_CLAIR=$with_clair
CLAIR_DB_PASSWORD=$clair_db_password
CLAIR_DB_HOST=$clair_db_host
CLAIR_DB_PORT=$clair_db_port
CLAIR_DB_USERNAME=$clair_db_username
CLAIR_DB=$clair_db
RESET=$reload_config
UAA_ENDPOINT=$uaa_endpoint
UAA_CLIENTID=$uaa_clientid
UAA_CLIENTSECRET=$uaa_clientsecret
UAA_VERIFY_CERT=$uaa_verify_cert
UI_URL=$ui_url
JOBSERVICE_URL=$jobservice_url
CLAIR_URL=$clair_url
NOTARY_URL=$notary_url
REGISTRY_STORAGE_PROVIDER_NAME=$storage_provider_name
READ_ONLY=false
`)))
HarborDBEnvTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
MYSQL_ROOT_PASSWORD=$db_password
`)))
HarborJobServerConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
---
#Protocol used to serve
protocol: "http"
#Config certification if use 'https' protocol
#https_config:
# cert: "server.crt"
# key: "server.key"
#Server listening port
port: 8080
#Worker pool
worker_pool:
#Worker concurrency
workers: $max_job_workers
backend: "redis"
#Additional config if use 'redis' backend
redis_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
#or ipaddress:port[,weight,password,database_index]
redis_url: $redis_url
namespace: "harbor_job_service_namespace"
#Logger for job
logger:
path: "/var/log/jobs"
level: "INFO"
archive_period: 14 #days
#Admin server endpoint
admin_server: "http://adminserver:8080/"
`)))
HarborJobServerEnvTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
UI_SECRET=$ui_secret
JOBSERVICE_SECRET=$jobservice_secret
ADMINSERVER_URL=$adminserver_url
GODEBUG=netdns=cgo
`)))
HarborLogrotateConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
/var/log/docker/*.log {
rotate $log_rotate_count
size $log_rotate_size
copytruncate
compress
missingok
nodateext
}
`)))
HarborNginxHTTPSConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
worker_processes auto;
events {
worker_connections 1024;
use epoll;
multi_accept on;
}
http {
tcp_nodelay on;
include /etc/nginx/conf.d/*.upstream.conf;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream registry {
server registry:5000;
}
upstream ui {
server ui:8080;
}
log_format timed_combined '$$remote_addr - '
'"$$request" $$status $$body_bytes_sent '
'"$$http_referer" "$$http_user_agent" '
'$$request_time $$upstream_response_time $$pipe';
access_log /dev/stdout timed_combined;
include /etc/nginx/conf.d/*.server.conf;
server {
listen 443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
ssl_certificate $ssl_cert;
ssl_certificate_key $ssl_cert_key;
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.1 TLSv1.2;
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://ui/;
proxy_set_header Host $$http_host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
# Add Secure flag when serving HTTPS
proxy_cookie_path / "/; secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://ui/registryproxy/v2/;
proxy_set_header Host $$http_host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/ {
proxy_pass http://ui/service/;
proxy_set_header Host $$http_host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
server {
listen 80;
#server_name harbordomain.com;
return 301 https://$$host$$request_uri;
}
}
`)))
HarborNginxHTTPConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
worker_processes auto;
events {
worker_connections 1024;
use epoll;
multi_accept on;
}
http {
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream registry {
server registry:5000;
}
upstream ui {
server ui:8080;
}
log_format timed_combined '$$remote_addr - '
'"$$request" $$status $$body_bytes_sent '
'"$$http_referer" "$$http_user_agent" '
'$$request_time $$upstream_response_time $$pipe';
access_log /dev/stdout timed_combined;
server {
listen 80;
server_tokens off;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
location / {
proxy_pass http://ui/;
proxy_set_header Host $$host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://ui/registryproxy/v2/;
proxy_set_header Host $$http_host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/ {
proxy_pass http://ui/service/;
proxy_set_header Host $$host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $$scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
}
`)))
HarborRegistryConfigTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
version: 0.1
log:
level: info
fields:
service: registry
storage:
cache:
layerinfo: inmemory
$storage_provider_info
maintenance:
uploadpurging:
enabled: false
delete:
enabled: true
http:
addr: :5000
secret: placeholder
debug:
addr: localhost:5001
auth:
token:
issuer: harbor-token-issuer
realm: $public_url/service/token
rootcertbundle: /etc/registry/root.crt
service: harbor-registry
notifications:
endpoints:
- name: harbor
disabled: false
url: $ui_url/service/notifications
timeout: 3000ms
threshold: 5
backoff: 1s
`)))
HarborUIAppTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
appname = Harbor
runmode = dev
enablegzip = true
[dev]
httpport = 8080
`)))
HarborUIEnvTempl = template.Must(template.New("harbor").Parse(dedent.Dedent(`
LOG_LEVEL=info
CONFIG_PATH=/etc/ui/app.conf
UI_SECRET=$ui_secret
JOBSERVICE_SECRET=$jobservice_secret
GODEBUG=netdns=cgo
ADMINSERVER_URL=$adminserver_url
UAA_CA_ROOT=/etc/ui/certificates/uaa_ca.pem
_REDIS_URL=$redis_url
`)))
)
|
package contruntime
import (
"encoding/json"
. "github.com/onsi/gomega"
"github.com/werf/werf/integration/pkg/utils"
"github.com/werf/werf/test/pkg/thirdparty/contruntime/manifest"
)
func NewDockerRuntime() ContainerRuntime {
return &DockerRuntime{}
}
type DockerRuntime struct {
BaseContainerRuntime
}
func (r *DockerRuntime) ExpectCmdsToSucceed(image string, cmds ...string) {
expectCmdsToSucceed(r, image, cmds...)
}
func (r *DockerRuntime) RunSleepingContainer(containerName, image string) {
utils.RunSucceedCommand("/",
"docker", "run", "--rm", "-d", "--entrypoint=", "--name", containerName, image, "tail", "-f", "/dev/null",
)
}
func (r *DockerRuntime) Exec(containerName string, cmds ...string) {
for _, cmd := range cmds {
utils.RunSucceedCommand("/", "docker", "exec", containerName, "sh", "-ec", cmd)
}
}
func (r *DockerRuntime) Rm(containerName string) {
utils.RunSucceedCommand("/", "docker", "rm", "-fv", containerName)
}
func (r *DockerRuntime) Pull(image string) {
utils.RunSucceedCommand("/", "docker", "pull", image)
}
func (r *DockerRuntime) GetImageInspectConfig(image string) (config manifest.Schema2Config) {
configRaw, err := utils.RunCommand("/", "docker", "image", "inspect", "-f", "{{ json .Config }}", image)
Expect(err).NotTo(HaveOccurred())
Expect(json.Unmarshal(configRaw, &config)).To(Succeed())
return config
}
|
package main
import "errors"
import "fmt"
func f1(arg int) (int, error) {
if arg == 42 {
return -1, errors.New("the number 42 is unlucky man")
}
return arg + 3, nil
}
type argError struct {
arg int
prob string
}
func (e argError) Error() string {
return fmt.Sprintf("%d - %s", e.arg, e.prob)
}
func f2(arg int) (int, error) {
if arg == 42 {
return -1, &argError{arg, "can't work with this"}
}
return arg + 3, nil
}
|
package requests
import (
"encoding/json"
"testing"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/assert"
)
func TestDecodeWalletAddRequest(t *testing.T) {
encoded := `{"action":"wallet_add","key":"1234","wallet":"1234"}`
var decoded WalletAddRequest
json.Unmarshal([]byte(encoded), &decoded)
assert.Equal(t, "wallet_add", decoded.Action)
assert.Equal(t, "1234", decoded.Key)
assert.Equal(t, "1234", decoded.Wallet)
encoded = `{"action":"wallet_add","key":"","wallet":"12345"}`
json.Unmarshal([]byte(encoded), &decoded)
assert.Equal(t, "wallet_add", decoded.Action)
assert.Equal(t, "", decoded.Key)
assert.Equal(t, "12345", decoded.Wallet)
}
func TestMapStructureDecodeWalletAddRequest(t *testing.T) {
request := map[string]interface{}{
"action": "wallet_add",
"key": "1234",
"wallet": "1234",
}
var decoded WalletAddRequest
mapstructure.Decode(request, &decoded)
assert.Equal(t, "wallet_add", decoded.Action)
assert.Equal(t, "1234", decoded.Key)
assert.Equal(t, "1234", decoded.Wallet)
}
|
package main
import "fmt"
func main() {
funcionarios := map[string]float64{
"José de Arimateia": 7564.15,
"Maria Madalena": 5461.3,
"João Batista": 10000.0,
}
fmt.Println(funcionarios["João Batista"])
/*
Não há erros ao tentar acessar um elemento inexistente ou ainda
Também não ocorre erros ao tentar excluir um elemento que não está no map
*/
fmt.Println(funcionarios["Tales"])
delete(funcionarios, "Tales")
}
|
package orm
import (
"database/sql"
"testing"
)
func TestDelete(t *testing.T) {
_, got, _, _ := Delete(nil, "", nil)
want := "db can't be nil"
if got != nil && got.Error() != want {
t.Errorf("got %q; want %q", got, want)
}
db := &sql.DB{}
_, got2, _, _ := Delete(db, "", nil)
want2 := "table can't be empty"
if got2 != nil && got2.Error() != want2 {
t.Errorf("got %q; want %q", got2, want2)
}
_, got3, _, _ := Delete(db, "user", nil)
want3 := "id can't be nil"
if got3 != nil && got3.Error() != want3 {
t.Errorf("got %q; want %q", got3, want3)
}
}
|
package controllers
import (
"github.com/go-pg/pg"
"github.com/go-pg/pg/orm"
"github.com/goadesign/goa"
"github.com/odiak/MoneyForest/app"
"github.com/odiak/MoneyForest/constants"
"github.com/odiak/MoneyForest/store"
uuid "github.com/satori/go.uuid"
)
// AccountController implements the user resource.
type AccountController struct {
*CommonController
db orm.DB
}
// NewAccountController creates a user controller.
func NewAccountController(service *goa.Service, db orm.DB) *AccountController {
return &AccountController{
CommonController: NewCommonController(service, "AccountController"),
db: db,
}
}
func ToAccountMedia(account *store.Account) *app.AccountMedia {
id, _ := uuid.FromString(account.ID)
return &app.AccountMedia{
ID: id,
Name: account.Name,
Description: account.Description,
AccountType: account.AccountType,
Balance: int(account.Balance),
HasBalance: account.HasBalance,
}
}
func ToAccountMediaList(accounts []store.Account) []*app.AccountMedia {
slice := make([]*app.AccountMedia, len(accounts))
for i, account := range accounts {
slice[i] = ToAccountMedia(&account)
}
return slice
}
func FromAccountPayload(payload *app.AccountPayload) *store.Account {
return &store.Account{
Name: payload.Name,
Description: payload.Description,
AccountType: payload.AccountType,
HasBalance: payload.HasBalance,
Balance: int32(payload.Balance),
}
}
func UpdateFromAccountPayload(account *store.Account, payload *app.AccountPayload) {
account.Name = payload.Name
account.Description = payload.Description
account.AccountType = payload.AccountType
account.HasBalance = payload.HasBalance
account.Balance = int32(payload.Balance)
}
func (c *AccountController) List(ctx *app.ListAccountContext) error {
currentUser := GetCurrentUser(ctx)
count := ctx.Count
page := ctx.Page
var accounts []store.Account
err := c.db.Model(&accounts).
Where("owner_id = ?", currentUser.ID).
Order("name").
Limit(count + 1).
Offset((page - 1) * count).
Select()
if err != nil && err != pg.ErrNoRows {
return c.UnexpectedError(err)
}
hasNext := len(accounts) > count
if hasNext {
accounts = accounts[:len(accounts)-1]
}
return ctx.OK(&app.AccountListMedia{
Accounts: ToAccountMediaList(accounts),
HasNext: hasNext,
})
}
func (c *AccountController) Show(ctx *app.ShowAccountContext) error {
currentUser := ctx.Value(constants.CurrentUserKey).(*store.User)
account := &store.Account{ID: ctx.AccountID.String()}
err := c.db.Model(account).
Where("id = ?id AND owner_id = ?", currentUser.ID).
Select()
if err != nil {
if err == pg.ErrNoRows {
return ctx.NotFound()
}
return c.UnexpectedError(err)
}
return ctx.OK(ToAccountMedia(account))
}
func (c *AccountController) Create(ctx *app.CreateAccountContext) error {
currentUser := ctx.Value(constants.CurrentUserKey).(*store.User)
account := FromAccountPayload(ctx.Payload)
account.OwnerID = currentUser.ID
err := c.db.Insert(account)
if err != nil {
return c.UnexpectedError(err)
}
return ctx.OK(ToAccountMedia(account))
}
func (c *AccountController) Update(ctx *app.UpdateAccountContext) error {
currentUser := ctx.Value(constants.CurrentUserKey).(*store.User)
accountID := ctx.AccountID.String()
account := store.Account{}
err := c.db.Model(&account).
Where("id = ?", accountID).
Where("owner_id = ?", currentUser.ID).
Limit(1).
Select()
if err != nil {
if err == pg.ErrNoRows {
return ctx.NotFound()
}
return c.UnexpectedError(err)
}
UpdateFromAccountPayload(&account, ctx.Payload)
err = c.db.Update(&account)
if err != nil {
return c.UnexpectedError(err)
}
return ctx.OK(ToAccountMedia(&account))
}
func (c *AccountController) Delete(ctx *app.DeleteAccountContext) error {
currentUser := ctx.Value(constants.CurrentUserKey).(*store.User)
account := &store.Account{ID: ctx.AccountID.String()}
_, err := c.db.Model(account).
Where("id = ?id").
Where("owner_id = ?", currentUser.ID).
Delete()
if err != nil {
return c.UnexpectedError(err)
}
return ctx.NoContent()
}
|
package main
import (
"fmt"
"log"
T "gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
func main() {
g := T.NewGraph()
x := T.NewMatrix(g, T.Float32, T.WithName("x"), T.WithShape(100, 100))
y := T.NewMatrix(g, T.Float32, T.WithName("y"), T.WithShape(100, 100))
xpy := T.Must(T.Add(x, y))
xpy2 := T.Must(T.Tanh(xpy))
m := T.NewTapeMachine(g, T.UseCudaFor("tanh"))
T.Let(x, tensor.New(tensor.WithShape(100, 100), tensor.WithBacking(tensor.Random(tensor.Float32, 100*100))))
T.Let(y, tensor.New(tensor.WithShape(100, 100), tensor.WithBacking(tensor.Random(tensor.Float32, 100*100))))
for i := 0; i < 1000; i++ {
if err := m.RunAll(); err != nil {
log.Fatalf("iteration: %d. Err: %v", i, err)
}
}
fmt.Printf("%1.1f", xpy2.Value())
}
|
import (
"math/rand"
)
type Solution struct {
nums []int
}
func Constructor(nums []int) Solution {
s := Solution{nums}
return s
}
func (this *Solution) Pick(target int) int {
selected, count := 0, 1
for idx, v := range this.nums{
if v != target{
continue
}
if rand.Intn(count) == 0{
selected = idx
}
count += 1
}
return selected
}
|
package main
import (
"fmt"
"math"
)
// Go is not object-based.
// However, structural types such as struct (or any complex types, user-defined or otherwise)
// can be the recipient of method signatures.
type Vertex struct {
X, Y float64
}
// type Vertex receives method Scale.
// Note the receiver is specified as a pointer to the type
// Any update to the receiver will be made to the directly (via pointer.value)
// If the receiver is specified as value (v Vertex),
// then the function will receive a copy value of the receiver.
// To see this, change the receiver's signature from v *Vertex to v Vertex and see.
func (v *Vertex) Scale(factor float64) {
v.X = v.X * factor
v.Y = v.Y * factor
}
// A custom float type
type MyFloat float64
// a receiving method for the MyFloat type.
// Notice Abs receives a value not a pointer.
// It's assume that each value of MyFloat will be different copy of float64.
func (f MyFloat) Abs() float64 {
if f < 0 {
return float64(-1 * f)
} else {
return float64(f)
}
}
func main() {
v := Vertex{10, 20}
fmt.Println("Vertex unscalled", v)
v.Scale(5)
fmt.Println("Vertex.Scale(5) = ", v)
f := MyFloat(-math.Sqrt2)
fmt.Println("Myfloat.Abs() ", f.Abs())
}
|
package integration
import (
"fmt"
"os"
. "gopkg.in/check.v1"
)
func (s *RunSuite) TestFields(c *C) {
p := s.CreateProjectFromText(c, `
hello:
image: tianon/true
cpuset: 1,2
mem_limit: 4194304
`)
name := fmt.Sprintf("%s_%s_1", p, "hello")
cn := s.GetContainerByName(c, name)
c.Assert(cn, NotNil)
c.Assert(cn.Config.Image, Equals, "tianon/true")
c.Assert(cn.HostConfig.CPUSetCPUs, Equals, "1,2")
c.Assert(cn.HostConfig.Memory, Equals, int64(4194304))
}
func (s *RunSuite) TestEmptyEntrypoint(c *C) {
p := s.CreateProjectFromText(c, `
nil-cmd:
image: busybox
entrypoint: []
`)
name := fmt.Sprintf("%s_%s_1", p, "nil-cmd")
cn := s.GetContainerByName(c, name)
c.Assert(cn, NotNil)
c.Assert(cn.Config.Entrypoint, IsNil)
}
func (s *RunSuite) TestHelloWorld(c *C) {
p := s.CreateProjectFromText(c, `
hello:
image: tianon/true
`)
name := fmt.Sprintf("%s_%s_1", p, "hello")
cn := s.GetContainerByName(c, name)
c.Assert(cn, NotNil)
c.Assert(cn.Name, Equals, "/"+name)
}
func (s *RunSuite) TestContainerName(c *C) {
containerName := "containerName"
template := fmt.Sprintf(`hello:
image: busybox
command: top
container_name: %s`, containerName)
s.CreateProjectFromText(c, template)
cn := s.GetContainerByName(c, containerName)
c.Assert(cn, NotNil)
c.Assert(cn.Name, Equals, "/"+containerName)
}
func (s *RunSuite) TestContainerNameWithScale(c *C) {
containerName := "containerName"
template := fmt.Sprintf(`hello:
image: busybox
command: top
container_name: %s`, containerName)
p := s.CreateProjectFromText(c, template)
s.FromText(c, p, "scale", "hello=2", template)
containers := s.GetContainersByProject(c, p)
c.Assert(len(containers), Equals, 1)
}
func (s *RunSuite) TestInterpolation(c *C) {
os.Setenv("IMAGE", "tianon/true")
p := s.CreateProjectFromText(c, `
test:
image: $IMAGE
`)
name := fmt.Sprintf("%s_%s_1", p, "test")
testContainer := s.GetContainerByName(c, name)
p = s.CreateProjectFromText(c, `
reference:
image: tianon/true
`)
name = fmt.Sprintf("%s_%s_1", p, "reference")
referenceContainer := s.GetContainerByName(c, name)
c.Assert(testContainer, NotNil)
c.Assert(referenceContainer.Image, Equals, testContainer.Image)
os.Unsetenv("IMAGE")
}
func (s *RunSuite) TestInterpolationWithExtends(c *C) {
os.Setenv("IMAGE", "tianon/true")
os.Setenv("TEST_PORT", "8000")
p := s.CreateProjectFromText(c, `
test:
extends:
file: ./assets/interpolation/docker-compose.yml
service: base
ports:
- ${TEST_PORT}
`)
name := fmt.Sprintf("%s_%s_1", p, "test")
testContainer := s.GetContainerByName(c, name)
p = s.CreateProjectFromText(c, `
reference:
image: tianon/true
ports:
- 8000
`)
name = fmt.Sprintf("%s_%s_1", p, "reference")
referenceContainer := s.GetContainerByName(c, name)
c.Assert(testContainer, NotNil)
c.Assert(referenceContainer.Image, Equals, testContainer.Image)
os.Unsetenv("TEST_PORT")
os.Unsetenv("IMAGE")
}
func (s *RunSuite) TestFieldTypeConversions(c *C) {
os.Setenv("LIMIT", "40000000")
p := s.CreateProjectFromText(c, `
test:
image: tianon/true
mem_limit: $LIMIT
memswap_limit: "40000000"
hostname: 100
`)
name := fmt.Sprintf("%s_%s_1", p, "test")
testContainer := s.GetContainerByName(c, name)
p = s.CreateProjectFromText(c, `
reference:
image: tianon/true
mem_limit: 40000000
memswap_limit: 40000000
hostname: "100"
`)
name = fmt.Sprintf("%s_%s_1", p, "reference")
referenceContainer := s.GetContainerByName(c, name)
c.Assert(testContainer, NotNil)
c.Assert(referenceContainer.Image, Equals, testContainer.Image)
os.Unsetenv("LIMIT")
}
|
// Copyright 2014 Gyepi Sam. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package redux
import (
"encoding/json"
"path/filepath"
)
func decodePrerequisite(b []byte) (Prerequisite, error) {
var p Prerequisite
return p, json.Unmarshal(b, &p)
}
func decodeDependent(b []byte) (Dependent, error) {
var d Dependent
return d, json.Unmarshal(b, &d)
}
func (f *File) AsDependent(dir string) Dependent {
relpath, err := filepath.Rel(dir, f.Fullpath())
if err != nil {
panic(err)
}
return Dependent{Path: relpath}
}
func (f *File) AsPrerequisite(dir string, m *Metadata) Prerequisite {
relpath, err := filepath.Rel(dir, f.Fullpath())
if err != nil {
panic(err)
}
return Prerequisite{Path: relpath, Metadata: m}
}
// Get returns a database record decoded into the specified type.
func (f *File) Get(key string, obj interface{}) (bool, error) {
data, found, err := f.db.Get(key)
defer f.Debug("@Get %s -> %t ...\n", key, found)
if err == nil && found {
err = json.Unmarshal(data, &obj)
}
return found, err
}
// Put stores a database record.
func (f *File) Put(key string, obj interface{}) (err error) {
defer f.Debug("@Put %s -> %s\n", key, err)
b, err := json.Marshal(obj)
if err != nil {
return err
}
return f.db.Put(key, b)
}
|
package app
import (
"github.com/swipely/iam-docker/src/docker"
"github.com/swipely/iam-docker/src/iam"
"net/url"
"time"
)
// App holds the state of the application.
type App struct {
Config *Config
DockerClient docker.RawClient
STSClient iam.STSClient
}
// Config holds application configuration
type Config struct {
ListenAddr string
MetaDataUpstream *url.URL
EventHandlers int
ReadTimeout time.Duration
WriteTimeout time.Duration
DockerSyncPeriod time.Duration
CredentialRefreshPeriod time.Duration
DisableUpstream bool
}
|
package metre
import (
"fmt"
"errors"
"github.com/satori/go.uuid"
)
type Scheduler struct {
Queue Queue
Cache Cache
}
func NewScheduler(q Queue, c Cache) Scheduler {
return Scheduler{q, c}
}
// Schedule schedules a task in the cache and queue if no task is actively waiting to be processed
func (s Scheduler) Schedule(t TaskRecord) (string, error) {
key := buildTaskKey(t)
old, _ := s.Cache.Get(key)
sched := false
var oldTsk TaskRecord
var err error
if old == "" {
sched = true
} else {
oldTsk, _ = ParseTask(old)
if oldTsk.CanReschedule() {
sched = true
} else {
err = errors.New("A Task with the submitted ID and UID [" + oldTsk.ID + ", " + oldTsk.UID + "] is being processed")
}
}
if sched {
return schedule(key, t, s.Queue, s.Cache)
} else {
return key, err
}
}
// ForceSchedule schedules a task in the cache and queue regardless of tasks actively waiting to be processed
func (s Scheduler) ForceSchedule(t TaskRecord) (string, error) {
key := buildTaskKey(t)
old, _ := s.Cache.Get(key)
var oldTsk TaskRecord
// affix an additional UID if there was a collision
if old != "" {
oldTsk, _ = ParseTask(old)
if oldTsk.UID == t.UID {
uid := uuid.NewV4().String()
t.UID = t.UID + "-" + uid
}
}
return schedule(buildTaskKey(t), t, s.Queue, s.Cache)
}
// SetExpire set teh expiration for a task
func (s Scheduler) SetExpire(t TaskRecord, time int) {
s.Cache.Expire(buildTaskKey(t), time)
}
// scheduler performs a transaction cache and queue
func schedule(k string, t TaskRecord, q Queue, c Cache) (string, error) {
t.SetScheduled()
str, _ := t.ToString()
_, cErr := c.Set(k, str)
if cErr != nil {
return k, fmt.Errorf("set cache returned error: %v", cErr)
}
_, qErr := q.Push(str)
if qErr != nil {
return k, fmt.Errorf("push queue returned error: %v", qErr)
}
return k, nil
}
|
package main
func canConstruct(ransomNote string, magazine string) bool {
ransomMap := make(map[int32]int)
for _, c := range ransomNote {
ransomMap[c]++
}
magazineMap := make(map[int32]int)
for _, c := range magazine {
magazineMap[c]++
}
for c, rCnt := range ransomMap {
mCnt, ok := magazineMap[c]
if !ok {
return false
}
if mCnt < rCnt {
return false
}
}
return true
}
|
package app
const (
SUCCESS = "ok"
ERROR = "请求失败"
INVALID_PARAMS = "请求参数错误"
SERVER_ERROR = "服务错误"
)
|
package main
import (
"fmt"
"time"
"github.com/dymm/orchestrators/messageQ/pkg/config"
"github.com/dymm/orchestrators/messageQ/pkg/workflow"
)
func main() {
myMessageQueue := config.CreateMQMessageQueueOrDie()
allWorflows := getTheWorkflowsOrDie()
workflow.StartSessionTimeoutChecking(myMessageQueue, myMessageQueue.GetName())
for {
workItem, err := myMessageQueue.Receive()
var selectedWorkflow workflow.Workflow
var workflowSession *workflow.Session
if err == nil {
selectedWorkflow, workflowSession, err = workflow.GetTheWorkflowAndSession(allWorflows, workItem)
}
var finished bool
if err == nil {
finished, err = workflow.SendToTheNextProcessor(myMessageQueue, selectedWorkflow, workflowSession, workItem)
}
if err != nil {
fmt.Printf("Error while executing the workflow %d. %s\n", workflowSession.Key, err)
finished = true
}
if finished {
fmt.Printf("Workflow '%d' finished in %d ms\n", workflowSession.Key, time.Now().Sub(workflowSession.CurrentStep.Started).Milliseconds())
workflow.DeleteSession(workflowSession)
}
}
}
func getTheWorkflowsOrDie() []workflow.Workflow {
return []workflow.Workflow{
workflow.New("Value lower than 50",
workflow.Validator{Value: "data.Value", Regex: `^(\d|[0-5]\d?)$`}, //50 or less
"Step 1",
map[string]workflow.Step{
"Step 1": workflow.Step{
Process: "processor-sub",
OnSuccess: "Step 2",
OnError: "Dump",
Timeout: 2,
},
"Step 2": workflow.Step{
Process: "processor-print",
},
"Dump": workflow.Step{
Process: "processor-error",
},
},
),
workflow.New("Value greater or equal than 50",
workflow.Validator{Value: "data.Value", Regex: `^([6-9]\d|\d{3,})$`}, //Greater than 50,
"Step 1",
map[string]workflow.Step{
"Step 1": workflow.Step{
Process: "processor-add",
OnSuccess: "Step 2",
OnError: "Dump",
Timeout: 2,
},
"Step 2": workflow.Step{
Process: "processor-add",
OnSuccess: "Step 3",
OnError: "Dump",
Timeout: 2,
},
"Step 3": workflow.Step{
Process: "processor-print",
},
"Dump": workflow.Step{
Process: "processor-error",
},
},
),
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package addindextest
import (
"context"
"strconv"
"sync"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/logutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
const (
tableNum = 3
nonPartTabNum = 1
)
type suiteContext struct {
ctx context.Context
cancel func()
store kv.Storage
t *testing.T
tk *testkit.TestKit
isUnique bool
isPK bool
tableNum int
colNum int
rowNum int
workload *workload
tkPool *sync.Pool
isFailpointsTest bool
failSync sync.WaitGroup
CompCtx *CompatibilityContext
}
func (s *suiteContext) getTestKit() *testkit.TestKit {
return s.tkPool.Get().(*testkit.TestKit)
}
func (s *suiteContext) putTestKit(tk *testkit.TestKit) {
s.tkPool.Put(tk)
}
func (s *suiteContext) done() bool {
select {
case <-s.ctx.Done():
return true
default:
return false
}
}
func newSuiteContext(t *testing.T, tk *testkit.TestKit, store kv.Storage) *suiteContext {
return &suiteContext{
store: store,
t: t,
tk: tk,
tableNum: 3,
colNum: 28,
rowNum: 64,
isFailpointsTest: false,
}
}
func genTableStr(tableName string) string {
tableDef := "create table addindex." + tableName + " (" +
"c0 int, c1 bit(8), c2 boolean, c3 tinyint default 3, c4 smallint not null, c5 mediumint," +
"c6 int, c7 bigint, c8 float, c9 double, c10 decimal(13,7), c11 date, c12 time, c13 datetime," +
"c14 timestamp, c15 year, c16 char(10), c17 varchar(10), c18 text, c19 tinytext, c20 mediumtext," +
"c21 longtext, c22 binary(20), c23 varbinary(30), c24 blob, c25 tinyblob, c26 MEDIUMBLOB, c27 LONGBLOB," +
"c28 json, c29 INT AS (JSON_EXTRACT(c28, '$.population')))"
return tableDef
}
func genPartTableStr() (tableDefs []string) {
num := nonPartTabNum
// Range table def
tableDefs = append(tableDefs, "CREATE TABLE addindex.t"+strconv.Itoa(num)+" ("+
"c0 int, c1 bit(8), c2 boolean, c3 tinyint default 3, c4 smallint not null, c5 mediumint,"+
"c6 int, c7 bigint, c8 float, c9 double, c10 decimal(13,7), c11 date, c12 time, c13 datetime,"+
"c14 timestamp, c15 year, c16 char(10), c17 varchar(10), c18 text, c19 tinytext, c20 mediumtext,"+
"c21 longtext, c22 binary(20), c23 varbinary(30), c24 blob, c25 tinyblob, c26 MEDIUMBLOB, c27 LONGBLOB,"+
"c28 json, c29 INT AS (JSON_EXTRACT(c28, '$.population')))"+
" PARTITION BY RANGE (`c0`)"+
" (PARTITION `p0` VALUES LESS THAN (10),"+
" PARTITION `p1` VALUES LESS THAN (20),"+
" PARTITION `p2` VALUES LESS THAN (30),"+
" PARTITION `p3` VALUES LESS THAN (40),"+
" PARTITION `p4` VALUES LESS THAN (50),"+
" PARTITION `p5` VALUES LESS THAN (60),"+
" PARTITION `p6` VALUES LESS THAN (70),"+
" PARTITION `p7` VALUES LESS THAN (80),"+
" PARTITION `p8` VALUES LESS THAN MAXVALUE)")
num++
// Hash part table
tableDefs = append(tableDefs, "CREATE TABLE addindex.t"+strconv.Itoa(num)+" ("+
"c0 int, c1 bit(8), c2 boolean, c3 tinyint default 3, c4 smallint not null, c5 mediumint,"+
"c6 int, c7 bigint, c8 float, c9 double, c10 decimal(13,7), c11 date, c12 time, c13 datetime,"+
"c14 timestamp, c15 year, c16 char(10), c17 varchar(10), c18 text, c19 tinytext, c20 mediumtext,"+
"c21 longtext, c22 binary(20), c23 varbinary(30), c24 blob, c25 tinyblob, c26 MEDIUMBLOB, c27 LONGBLOB,"+
"c28 json, c29 INT AS (JSON_EXTRACT(c28, '$.population')))"+
" PARTITION BY HASH (c0) PARTITIONS 4")
return
}
func createTable(tk *testkit.TestKit) {
for i := 0; i < nonPartTabNum; i++ {
tableName := "t" + strconv.Itoa(i)
tableDef := genTableStr(tableName)
tk.MustExec(tableDef)
}
tableDefs := genPartTableStr()
for _, tableDef := range tableDefs {
tk.MustExec(tableDef)
}
}
func insertRows(tk *testkit.TestKit) {
var (
insStr string
values = []string{
" (1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 1111.1111, '2001-01-01', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 1112.1111, '2001-01-02', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 1113.1111, '2001-01-03', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 1114.1111, '2001-01-04', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (5, 5, 1, 1, 1, 1, 5, 1, 1.0, 1.0, 1111.1111, '2001-01-05', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'eeee', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (6, 2, 2, 2, 2, 2, 6, 2, 2.0, 2.0, 1112.1111, '2001-01-06', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'ffff', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (7, 3, 3, 3, 3, 3, 7, 3, 3.0, 3.0, 1113.1111, '2001-01-07', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'gggg', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (8, 4, 4, 4, 4, 4, 8, 4, 4.0, 4.0, 1114.1111, '2001-01-08', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'hhhh', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (9, 1, 1, 1, 1, 1, 9, 1, 1.0, 1.0, 1111.1111, '2001-01-09', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'iiii', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (10, 2, 2, 2, 2, 2, 10, 2, 2.0, 2.0, 1112.1111, '2001-01-10', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'jjjj', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (11, 3, 3, 3, 3, 3, 11, 3, 3.0, 3.0, 1113.1111, '2001-01-11', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'kkkk', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (12, 4, 4, 4, 4, 4, 12, 4, 4.0, 4.0, 1114.1111, '2001-01-12', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'llll', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (13, 5, 1, 1, 1, 1, 13, 1, 1.0, 1.0, 1111.1111, '2001-01-13', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'mmmm', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (14, 2, 2, 2, 2, 2, 14, 2, 2.0, 2.0, 1112.1111, '2001-01-14', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'nnnn', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (15, 3, 3, 3, 3, 3, 15, 3, 3.0, 3.0, 1113.1111, '2001-01-15', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'oooo', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (16, 4, 4, 4, 4, 4, 16, 4, 4.0, 4.0, 1114.1111, '2001-01-16', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'pppp', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (17, 1, 1, 1, 1, 1, 17, 1, 1.0, 1.0, 1111.1111, '2001-01-17', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'qqqq', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (18, 2, 2, 2, 2, 2, 18, 2, 2.0, 2.0, 1112.1111, '2001-01-18', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'rrrr', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (19, 3, 3, 3, 3, 3, 19, 3, 3.0, 3.0, 1113.1111, '2001-01-19', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'ssss', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (20, 4, 4, 4, 4, 4, 20, 4, 4.0, 4.0, 1114.1111, '2001-01-20', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'tttt', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (21, 5, 1, 1, 1, 1, 21, 1, 1.0, 1.0, 1111.1111, '2001-01-21', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'uuuu', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (22, 2, 2, 2, 2, 2, 22, 2, 2.0, 2.0, 1112.1111, '2001-01-22', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'vvvv', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (23, 3, 3, 3, 3, 3, 23, 3, 3.0, 3.0, 1113.1111, '2001-01-23', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'wwww', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (24, 4, 4, 4, 4, 4, 24, 4, 4.0, 4.0, 1114.1111, '2001-01-24', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'xxxx', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (25, 1, 1, 1, 1, 1, 25, 1, 1.0, 1.0, 1111.1111, '2001-01-25', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'yyyy', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (26, 2, 2, 2, 2, 2, 26, 2, 2.0, 2.0, 1112.1111, '2001-01-26', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'zzzz', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (27, 3, 3, 3, 3, 3, 27, 3, 3.0, 3.0, 1113.1111, '2001-01-27', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaab', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (28, 4, 4, 4, 4, 4, 28, 4, 4.0, 4.0, 1114.1111, '2001-01-28', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaac', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (29, 5, 1, 1, 1, 1, 29, 1, 1.0, 1.0, 1111.1111, '2001-01-29', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaad', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (30, 2, 2, 2, 2, 2, 30, 2, 2.0, 2.0, 1112.1111, '2001-01-30', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaae', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (31, 3, 3, 3, 3, 3, 31, 3, 3.0, 3.0, 1113.1111, '2001-01-31', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaaf', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (32, 4, 4, 4, 4, 4, 32, 4, 4.0, 4.0, 1114.1111, '2001-02-01', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaag', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (33, 1, 1, 1, 1, 1, 33, 1, 1.0, 1.0, 1111.1111, '2001-02-02', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaah', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (34, 2, 2, 2, 2, 2, 34, 2, 2.0, 2.0, 1112.1111, '2001-02-03', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaai', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (35, 3, 3, 3, 3, 3, 35, 3, 3.0, 3.0, 1113.1111, '2001-02-05', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaaj', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (36, 4, 4, 4, 4, 4, 36, 4, 4.0, 4.0, 1114.1111, '2001-02-04', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaak', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (37, 5, 1, 1, 1, 1, 37, 1, 1.0, 1.0, 1111.1111, '2001-02-06', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaal', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (38, 2, 2, 2, 2, 2, 38, 2, 2.0, 2.0, 1112.1111, '2001-02-07', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaam', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (39, 3, 3, 3, 3, 3, 39, 3, 3.0, 3.0, 1113.1111, '2001-02-08', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaan', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (40, 4, 4, 4, 4, 4, 40, 4, 4.0, 4.0, 1114.1111, '2001-02-09', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaao', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (41, 1, 1, 1, 1, 1, 41, 1, 1.0, 1.0, 1111.1111, '2001-02-10', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaap', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (42, 2, 2, 2, 2, 2, 42, 2, 2.0, 2.0, 1112.1111, '2001-02-11', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaaq', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (43, 3, 3, 3, 3, 3, 43, 3, 3.0, 3.0, 1113.1111, '2001-02-12', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaar', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (44, 4, 4, 4, 4, 4, 44, 4, 4.0, 4.0, 1114.1111, '2001-02-13', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaas', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (45, 5, 1, 1, 1, 1, 45, 1, 1.0, 1.0, 1111.1111, '2001-02-14', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaat', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (46, 2, 2, 2, 2, 2, 46, 2, 2.0, 2.0, 1112.1111, '2001-02-15', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaau', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (47, 3, 3, 3, 3, 3, 47, 3, 3.0, 3.0, 1113.1111, '2001-02-16', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaav', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (48, 4, 4, 4, 4, 4, 48, 4, 4.0, 4.0, 1114.1111, '2001-02-17', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaaw', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (49, 1, 1, 1, 1, 1, 49, 1, 1.0, 1.0, 1111.1111, '2001-02-18', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaax', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (50, 2, 2, 2, 2, 2, 50, 2, 2.0, 2.0, 1112.1111, '2001-02-19', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaay', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (51, 3, 3, 3, 3, 3, 51, 3, 3.0, 3.0, 1113.1111, '2001-02-20', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaaz', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (52, 4, 4, 4, 4, 4, 52, 4, 4.0, 4.0, 1114.1111, '2001-02-21', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaba', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (53, 5, 1, 1, 1, 1, 53, 1, 1.0, 1.0, 1111.1111, '2001-02-22', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaca', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (54, 2, 2, 2, 2, 2, 54, 2, 2.0, 2.0, 1112.1111, '2001-02-23', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aada', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (55, 3, 3, 3, 3, 3, 55, 3, 3.0, 3.0, 1113.1111, '2001-02-24', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaea', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (56, 4, 4, 4, 4, 4, 56, 4, 4.0, 4.0, 1114.1111, '2001-02-25', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aafa', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
" (57, 1, 1, 1, 1, 1, 57, 1, 1.0, 1.0, 1111.1111, '2001-02-26', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaga', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 100}')",
" (58, 2, 2, 2, 2, 2, 58, 2, 2.0, 2.0, 1112.1111, '2001-02-27', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aaha', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 101}')",
" (59, 3, 3, 3, 3, 3, 59, 3, 3.0, 3.0, 1113.1111, '2001-02-28', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aaia', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 102}')",
" (60, 4, 4, 4, 4, 4, 60, 4, 4.0, 4.0, 1114.1111, '2001-03-01', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aaja', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 103}')",
" (61, 5, 1, 1, 1, 1, 61, 1, 1.0, 1.0, 1111.1111, '2001-03-02', '11:11:11', '2001-01-01 11:11:11', '2001-01-01 11:11:11.123456', 1999, 'aaaa', 'aaaa', 'aaaa', 'aaka', 'aaaa','aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', 'aaaa', '{\"name\": \"Beijing\", \"population\": 104}')",
" (62, 2, 2, 2, 2, 2, 62, 2, 2.0, 2.0, 1112.1111, '2001-03-03', '11:11:12', '2001-01-02 11:11:12', '2001-01-02 11:11:12.123456', 2000, 'bbbb', 'bbbb', 'bbbb', 'aala', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', 'bbbb', '{\"name\": \"Beijing\", \"population\": 105}')",
" (63, 3, 3, 3, 3, 3, 63, 3, 3.0, 3.0, 1113.1111, '2001-03-04', '11:11:13', '2001-01-03 11:11:13', '2001-01-03 11:11:11.123456', 2001, 'cccc', 'cccc', 'cccc', 'aama', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', 'cccc', '{\"name\": \"Beijing\", \"population\": 106}')",
" (64, 4, 4, 4, 4, 4, 64, 4, 4.0, 4.0, 1114.1111, '2001-03-05', '11:11:14', '2001-01-04 11:11:14', '2001-01-04 11:11:12.123456', 2002, 'dddd', 'dddd', 'dddd', 'aana', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', 'dddd', '{\"name\": \"Beijing\", \"population\": 107}')",
}
)
for i := 0; i < tableNum; i++ {
insStr = "insert into addindex.t" + strconv.Itoa(i) + " (c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28) values"
for _, value := range values {
insStr := insStr + value
tk.MustExec(insStr)
}
}
}
func createIndexOneCol(ctx *suiteContext, tableID int, colID int) (err error) {
addIndexStr := " add index idx"
var ddlStr string
if ctx.isPK {
addIndexStr = " add primary key idx"
} else if ctx.isUnique {
addIndexStr = " add unique index idx"
}
length := 4
if ctx.isUnique && colID == 19 {
length = 16
}
if !(ctx.isPK || ctx.isUnique) || tableID == 0 || (ctx.isPK && tableID > 0) {
if colID >= 18 && colID < 29 {
ddlStr = "alter table addindex.t" + strconv.Itoa(tableID) + addIndexStr + strconv.Itoa(colID) + "(c" + strconv.Itoa(colID) + "(" + strconv.Itoa(length) + "))"
} else {
ddlStr = "alter table addindex.t" + strconv.Itoa(tableID) + addIndexStr + strconv.Itoa(colID) + "(c" + strconv.Itoa(colID) + ")"
}
} else if (ctx.isUnique) && tableID > 0 {
if colID >= 18 && colID < 29 {
ddlStr = "alter table addindex.t" + strconv.Itoa(tableID) + addIndexStr + strconv.Itoa(colID) + "(c0, c" + strconv.Itoa(colID) + "(" + strconv.Itoa(length) + "))"
} else {
ddlStr = "alter table addindex.t" + strconv.Itoa(tableID) + addIndexStr + strconv.Itoa(colID) + "(c0, c" + strconv.Itoa(colID) + ")"
}
}
if ctx.CompCtx != nil && ctx.CompCtx.isMultiSchemaChange {
colID += 60
ddlStr += " , add column c" + strconv.Itoa(colID) + " int;"
}
logutil.BgLogger().Info("createIndexOneCol", zap.String("category", "add index test"), zap.String("sql", ddlStr))
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
_, err = ctx.CompCtx.executor[tableID].tk.Exec(ddlStr)
} else {
_, err = ctx.tk.Exec(ddlStr)
}
if err != nil {
if ctx.isUnique || ctx.isPK {
require.Contains(ctx.t, err.Error(), "Duplicate entry")
} else {
require.NoError(ctx.t, err)
}
}
return err
}
func createIndexTwoCols(ctx *suiteContext, tableID int, indexID int, colID1 int, colID2 int) (err error) {
var colID1Str, colID2Str string
addIndexStr := " add index idx"
if ctx.isPK {
addIndexStr = " add primary key idx"
} else if ctx.isUnique {
addIndexStr = " add unique index idx"
}
if colID1 >= 18 && colID1 < 29 {
colID1Str = strconv.Itoa(colID1) + "(4)"
} else {
colID1Str = strconv.Itoa(colID1)
}
if colID2 >= 18 && colID2 < 29 {
colID2Str = strconv.Itoa(colID2) + "(4)"
} else {
colID2Str = strconv.Itoa(colID2)
}
ddlStr := "alter table addindex.t" + strconv.Itoa(tableID) + addIndexStr + strconv.Itoa(indexID) + "(c" + colID1Str + ", c" + colID2Str + ")"
if ctx.CompCtx != nil && ctx.CompCtx.isMultiSchemaChange {
colID1 += 60
ddlStr += " , add column c" + strconv.Itoa(colID1) + " varchar(10);"
}
logutil.BgLogger().Info("createIndexTwoCols", zap.String("category", "add index test"), zap.String("sql", ddlStr))
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
_, err = ctx.CompCtx.executor[tableID].tk.Exec(ddlStr)
} else {
_, err = ctx.tk.Exec(ddlStr)
}
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"),
zap.String("sql", ddlStr), zap.Error(err))
}
require.NoError(ctx.t, err)
return err
}
func checkResult(ctx *suiteContext, tableName string, indexID int, tkID int) {
var err error
adminCheckSQL := "admin check index " + tableName + " idx" + strconv.Itoa(indexID)
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
_, err = ctx.CompCtx.executor[tkID].tk.Exec(adminCheckSQL)
} else {
_, err = ctx.tk.Exec(adminCheckSQL)
}
if err != nil {
logutil.BgLogger().Error("checkResult", zap.String("category", "add index test"),
zap.String("sql", adminCheckSQL), zap.Error(err))
}
require.NoError(ctx.t, err)
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
require.Equal(ctx.t, uint64(0), ctx.CompCtx.executor[tkID].tk.Session().AffectedRows())
_, err = ctx.CompCtx.executor[tkID].tk.Exec("alter table " + tableName + " drop index idx" + strconv.Itoa(indexID))
} else {
require.Equal(ctx.t, uint64(0), ctx.tk.Session().AffectedRows())
_, err = ctx.tk.Exec("alter table " + tableName + " drop index idx" + strconv.Itoa(indexID))
}
if err != nil {
logutil.BgLogger().Error("drop index failed", zap.String("category", "add index test"),
zap.String("sql", adminCheckSQL), zap.Error(err))
}
require.NoError(ctx.t, err)
}
func checkTableResult(ctx *suiteContext, tableName string, tkID int) {
var err error
adminCheckSQL := "admin check table " + tableName
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
_, err = ctx.CompCtx.executor[tkID].tk.Exec(adminCheckSQL)
} else {
_, err = ctx.tk.Exec(adminCheckSQL)
}
if err != nil {
logutil.BgLogger().Error("checkTableResult", zap.String("category", "add index test"),
zap.String("sql", adminCheckSQL), zap.Error(err))
}
require.NoError(ctx.t, err)
if ctx.CompCtx != nil && ctx.CompCtx.isConcurrentDDL {
require.Equal(ctx.t, uint64(0), ctx.CompCtx.executor[tkID].tk.Session().AffectedRows())
} else {
require.Equal(ctx.t, uint64(0), ctx.tk.Session().AffectedRows())
}
}
func testOneColFrame(ctx *suiteContext, colIDs [][]int, f func(*suiteContext, int, string, int) error) {
for tableID := 0; tableID < ctx.tableNum; tableID++ {
tableName := "addindex.t" + strconv.Itoa(tableID)
for _, i := range colIDs[tableID] {
if ctx.workload != nil {
ctx.workload.start(ctx, tableID, i)
}
if ctx.isFailpointsTest {
ctx.failSync.Add(1)
go useFailpoints(ctx, i)
}
err := f(ctx, tableID, tableName, i)
if err != nil {
if ctx.isUnique || ctx.isPK {
require.Contains(ctx.t, err.Error(), "Duplicate entry")
} else {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
require.NoError(ctx.t, err)
}
}
if ctx.workload != nil {
_ = ctx.workload.stop(ctx, -1)
}
if ctx.isFailpointsTest {
ctx.failSync.Wait()
}
if err == nil {
checkResult(ctx, tableName, i, tableID)
}
}
}
}
func testTwoColsFrame(ctx *suiteContext, iIDs [][]int, jIDs [][]int, f func(*suiteContext, int, string, int, int, int) error) {
for tableID := 0; tableID < ctx.tableNum; tableID++ {
tableName := "addindex.t" + strconv.Itoa(tableID)
indexID := 0
for _, i := range iIDs[tableID] {
for _, j := range jIDs[tableID] {
if ctx.workload != nil {
ctx.workload.start(ctx, tableID, i, j)
}
if ctx.isFailpointsTest {
ctx.failSync.Add(1)
go useFailpoints(ctx, i)
}
err := f(ctx, tableID, tableName, indexID, i, j)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
}
require.NoError(ctx.t, err)
if ctx.workload != nil {
// Stop workload
_ = ctx.workload.stop(ctx, -1)
}
if ctx.isFailpointsTest {
ctx.failSync.Wait()
}
if err == nil && i != j {
checkResult(ctx, tableName, indexID, tableID)
}
indexID++
}
}
}
}
func testOneIndexFrame(ctx *suiteContext, colID int, f func(*suiteContext, int, string, int) error) {
for tableID := 0; tableID < ctx.tableNum; tableID++ {
tableName := "addindex.t" + strconv.Itoa(tableID)
if ctx.workload != nil {
ctx.workload.start(ctx, tableID, colID)
}
if ctx.isFailpointsTest {
ctx.failSync.Add(1)
go useFailpoints(ctx, tableID)
}
err := f(ctx, tableID, tableName, colID)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
}
require.NoError(ctx.t, err)
if ctx.workload != nil {
_ = ctx.workload.stop(ctx, -1)
}
if ctx.isFailpointsTest {
ctx.failSync.Wait()
}
if err == nil {
if ctx.isPK {
checkTableResult(ctx, tableName, tableID)
} else {
checkResult(ctx, tableName, colID, tableID)
}
}
}
}
func addIndexNonUnique(ctx *suiteContext, tableID int, tableName string, indexID int) (err error) {
ctx.isPK = false
ctx.isUnique = false
err = createIndexOneCol(ctx, tableID, indexID)
return err
}
func addIndexUnique(ctx *suiteContext, tableID int, tableName string, indexID int) (err error) {
ctx.isPK = false
ctx.isUnique = true
if indexID == 0 || indexID == 6 || indexID == 11 || indexID == 19 || tableID > 0 {
err = createIndexOneCol(ctx, tableID, indexID)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
} else {
logutil.BgLogger().Info("add index success", zap.String("category", "add index test"),
zap.String("table name", tableName), zap.Int("index ID", indexID))
}
require.NoError(ctx.t, err)
} else {
err = createIndexOneCol(ctx, tableID, indexID)
if err != nil {
require.Contains(ctx.t, err.Error(), "1062")
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"),
zap.Error(err), zap.String("table name", tableName), zap.Int("index ID", indexID))
}
}
return err
}
func addIndexPK(ctx *suiteContext, tableID int, tableName string, colID int) (err error) {
ctx.isPK = true
ctx.isUnique = false
err = createIndexOneCol(ctx, tableID, 0)
return err
}
func addIndexGenCol(ctx *suiteContext, tableID int, tableName string, colID int) (err error) {
ctx.isPK = false
ctx.isUnique = false
err = createIndexOneCol(ctx, tableID, 29)
return err
}
func addIndexMultiCols(ctx *suiteContext, tableID int, tableName string, indexID int, colID1 int, colID2 int) (err error) {
ctx.isPK = false
ctx.isUnique = false
if colID1 != colID2 {
err = createIndexTwoCols(ctx, tableID, indexID, colID1, colID2)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
}
require.NoError(ctx.t, err)
}
return err
}
type failpointsPath struct {
failpath string
inTerm string
}
var failpoints = []failpointsPath{
{"github.com/pingcap/tidb/ddl/mockHighLoadForAddIndex", "return"},
{"github.com/pingcap/tidb/ddl/mockBackfillRunErr", "1*return"},
{"github.com/pingcap/tidb/ddl/mockBackfillSlow", "return"},
{"github.com/pingcap/tidb/ddl/MockCaseWhenParseFailure", "return(true)"},
{"github.com/pingcap/tidb/ddl/mockHighLoadForMergeIndex", "return"},
{"github.com/pingcap/tidb/ddl/mockMergeRunErr", "1*return"},
{"github.com/pingcap/tidb/ddl/mockMergeSlow", "return"},
}
func useFailpoints(ctx *suiteContext, failpos int) {
defer ctx.failSync.Done()
logutil.BgLogger().Info("stack", zap.Stack("cur stack"), zap.Int("id:", failpos))
failpos %= 7
require.NoError(ctx.t, failpoint.Enable(failpoints[failpos].failpath, failpoints[failpos].inTerm))
logutil.BgLogger().Info("stack", zap.Stack("cur stack"), zap.Int("id:", failpos), zap.Bool("enable failpoints:", true))
time.Sleep(10 * time.Second)
require.NoError(ctx.t, failpoint.Disable(failpoints[failpos].failpath))
logutil.BgLogger().Info("stack", zap.Stack("cur stack"), zap.Int("id:", failpos), zap.Bool("disable failpoints:", true))
}
|
package things
import "fmt"
type Reader struct {}
func (r Reader) Init() {
fmt.Println("New Reader")
}
func (r Reader) Read() string {
return "read value"
} |
package preoblem
/*
给定一个链表,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。
为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始)。 如果 pos 是 -1,则在该链表中没有环。
说明:不允许修改给定的链表
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/linked-list-cycle-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
解题思路: 快慢指针, 快指针的速度为2, 慢指针的速度为1。
* 设链表到环的入口距离为 L, 其快慢指针的相遇点距离链表的入口地址S。
* 在相遇时慢指针走的路程: L + S(为什么慢指针走的路程为L+S 而不是其他的呢)
* 快指针的走的路程为 2(L+S) = L + S + nR
* 那么我们可以的到 L + S = nR, L = nR-S
* 入口在相遇点再走: nR-S 步, 那么一定会回到环的入口点。
*/
func detectCycle(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return nil
}
p1, p2 := head, head
for {
p2 = p2.Next
if p2 == nil {
return nil
}
p2 = p2.Next
if p2 == nil {
return nil
}
p1 = p1.Next
if p1 == p2 {
break
}
}
p1 = head
for p1 == p2 {
p1 = p1.Next
p2 = p2.Next
}
return p1
}
|
package scheduler
import (
"types"
"github.com/golang/glog"
)
var (
usersShare map[string]float64
usersAllocatedRes map[string]types.Resource
usersWeight map[string]float64
totalCpu, totalMemory int64
)
func init() {
usersShare = make(map[string]float64)
usersAllocatedRes = make(map[string]types.Resource)
usersWeight = make(map[string]float64)
}
func initShare() {
namespaces := getNamespaces()
for _, ns := range namespaces {
var res types.Resource
usersAllocatedRes[ns] = res
usersShare[ns] = 0
usersWeight[ns] = 1
}
nodes := getNodes()
for _, node := range nodes {
totalCpu += node.MilliCpu
totalMemory += node.Memory
}
pods := getRunningPods()
for _, pod := range pods {
res := usersAllocatedRes[pod.Uid]
res.MilliCpu += pod.RequestMilliCpu
res.Memory += pod.RequestMemory
usersAllocatedRes[pod.Uid] = res
usersShare[pod.Uid] = max(float64(res.MilliCpu)/float64(totalCpu), float64(res.Memory)/float64(totalMemory))
}
glog.Info("share is completed.")
}
func printShare() {
for k, v := range usersShare {
glog.Infof("%s's dominant share:%.2f", k, v)
}
}
func fixUserShare(pod types.Pod, weight float64) float64 {
res := usersAllocatedRes[pod.Uid]
res.MilliCpu += pod.RequestMilliCpu
res.Memory += pod.RequestMemory
usersAllocatedRes[pod.Uid] = res
w := usersWeight[pod.Uid]
w += weight
dominantShare := max(float64(res.MilliCpu)/float64(totalCpu), float64(res.Memory)/float64(totalMemory)) / w
usersShare[pod.Uid] = dominantShare
return dominantShare
}
func getUserShare(uid string) float64 {
return usersShare[uid]
}
func max(x, y float64) float64 {
if x >= y {
return x
}
return y
}
|
package ibeplus
import (
"encoding/xml"
"fmt"
"time"
"github.com/otwdev/ibepluslib/models"
"github.com/otwdev/galaxylib"
"github.com/asaskevich/govalidator"
)
const bookingURL = "http://ibeplus.travelsky.com/ota/xml/AirBook"
type PNRBooking struct {
Order *models.OrderInfo
PNR string
}
func NewPNRBooking(order *models.OrderInfo) *PNRBooking {
return &PNRBooking{order, ""}
}
func (p *PNRBooking) makeBookingPerson(trvaler *models.Traveler) *BookingPersonName {
if trvaler.IDCardType == string(models.IDCard) {
return &BookingPersonName{
AttrLanguageType: "ZH",
BookingSurname: &BookingSurname{
Text: trvaler.PersonName,
},
}
}
return &BookingPersonName{
BookingSurname: &BookingSurname{
Text: fmt.Sprintf("%s/%s", trvaler.GivenName, trvaler.SurName),
},
AttrLanguageType: "EN",
}
}
func (p *PNRBooking) Booking() *galaxylib.GalaxyError {
bk := &BookingRoot{}
pnr := p.Order.PnrInofs[0]
bk.BookingOTA_AirBookRQ = &BookingOTA_AirBookRQ{}
//Office号
bk.BookingOTA_AirBookRQ.BookingPOS = &BookingPOS{
BookingSource: &BookingSource{
AttrPseudoCityCode: pnr.OfficeNumber,
},
}
//航段
bk.BookingOTA_AirBookRQ.BookingAirItinerary = &BookingAirItinerary{}
bk.BookingOTA_AirBookRQ.BookingAirItinerary.BookingOriginDestinationOptions = &BookingOriginDestinationOptions{}
bk.BookingOTA_AirBookRQ.BookingAirItinerary.BookingOriginDestinationOptions.BookingOriginDestinationOption = &BookingOriginDestinationOption{}
var segments []*BookingFlightSegment
defaultAirline := ""
for _, v := range pnr.FlightSegments {
segments = append(segments, &BookingFlightSegment{
AttrArrivalDateTime: v.ArriveDateTime(),
AttrDepartureDateTime: v.DepartrueDateTime(),
AttrCodeshareInd: "false",
AttrFlighttNumber: v.FlyNo,
AttrStatus: "NN",
AttrSegmentType: "NORMAL",
AttrRPH: govalidator.ToString(v.TripSeq),
BookingDepartureAirport: &BookingDepartureAirport{
AttrLocationCode: v.DepartCityCode,
},
BookingArrivalAirport: &BookingArrivalAirport{
AttrLocationCode: v.ArriveCityCode,
},
BookingMarketingAirline: &BookingMarketingAirline{
AttrCode: v.MarketingAirLine,
},
BookingBookingClassAvail: &BookingBookingClassAvail{
AttrResBookDesigCode: v.Cabin,
},
})
if defaultAirline == "" {
defaultAirline = v.MarketingAirLine
}
}
bk.BookingOTA_AirBookRQ.BookingAirItinerary.BookingOriginDestinationOptions.BookingOriginDestinationOption.BookingFlightSegment = segments
//乘客
bk.BookingOTA_AirBookRQ.BookingTravelerInfo = &BookingTravelerInfo{}
var bookingTraveler []*BookingAirTraveler
ctcm := ""
for i, t := range pnr.TravelerInfos {
trl := &BookingAirTraveler{}
trl.AttrPassengerTypeCode = t.Type
trl.AttrGender = t.Gender
trl.BookingPersonName = p.makeBookingPerson(t)
doc := &BookingDocument{}
// doc.AttrDocHolderInd = "true"
doc.AttrBirthDate = t.Birthday
doc.AttrDocType = t.IDCardType
doc.AttrDocID = t.IDCardNo
doc.AttrDocHolderNationality = t.Nationality
doc.AttrDocIssueCountry = t.IDIssueCountry
doc.AttrBirthDate = t.Birthday
doc.AttrGender = t.Gender
doc.AttrExpireDate = t.IDExpireDate
doc.AttrDocTypeDetail = "P"
doc.AttrRPH = govalidator.ToString(i + 1)
doc.BookingDocHolderFormattedName = &BookingDocHolderFormattedName{
BookingGivenName: &BookingGivenName{
Text: t.GivenName,
},
BookingSurname: &BookingSurname{
Text: t.SurName,
},
}
trl.BookingDocument = doc
//rph,_ := govalidator.ToString()
trl.BookingTravelerRefNumber = append(trl.BookingTravelerRefNumber, &BookingTravelerRefNumber{
AttrRPH: govalidator.ToString(i + 1),
})
trl.BookingComment = &BookingComment{
Text: "HK",
}
trl.BookingFlightSegmentRPHs = &BookingFlightSegmentRPHs{
&BookingFlightSegmentRPH{"1"},
}
// trl.BookingDocumentFlightBinding = &BookingDocumentFlightBinding{
// BookingDocumentRPH: &BookingDocumentRPH{"1"},
// BookingFlightSegmentRPH: &BookingFlightSegmentRPH{"1"},
// }
bookingTraveler = append(bookingTraveler, trl)
if ctcm == "" {
ctcm = t.Mobile
}
}
bk.BookingOTA_AirBookRQ.BookingTravelerInfo.BookingAirTraveler = bookingTraveler
bk.BookingOTA_AirBookRQ.BookingTravelerInfo.BookingSpecialReqDetails = &BookingSpecialReqDetails{}
bk.BookingOTA_AirBookRQ.BookingTravelerInfo.BookingSpecialReqDetails.BookingOtherServiceInformations = &BookingOtherServiceInformations{}
var oths []*BookingOtherServiceInformation
oths = append(oths, &BookingOtherServiceInformation{
AttrCode: "OTHS",
BookingText: &BookingText{
Text: fmt.Sprintf("CTCT%s", p.Order.ContactInfo.MobilePhone),
},
BookingAirline: &BookingAirline{
AttrCode: defaultAirline,
},
})
oths = append(oths, &BookingOtherServiceInformation{
AttrCode: "OTHS",
BookingText: &BookingText{
Text: fmt.Sprintf("CTCM%s", ctcm),
},
BookingAirline: &BookingAirline{
AttrCode: defaultAirline,
},
BookingTravelerRefNumber: []*BookingTravelerRefNumber{
&BookingTravelerRefNumber{"1"},
},
})
bk.BookingOTA_AirBookRQ.BookingTravelerInfo.BookingSpecialReqDetails.BookingOtherServiceInformations.BookingOtherServiceInformation = oths
//出票信息
depTime := pnr.FlightSegments[0].DepartrueDateTime()
layout := "2006-01-02T15:04:05"
limitTime, _ := time.Parse(layout, depTime)
limitTime = limitTime.Add(-2 * time.Hour)
//contract := p.Order.ContactInfo[0]
bk.BookingOTA_AirBookRQ.BookingTicketing = &BookingTicketing{}
bk.BookingOTA_AirBookRQ.BookingTicketing.AttrTicketTimeLimit = limitTime.Format("2006-01-02T15:04:05")
bk.BookingOTA_AirBookRQ.BookingTPA_Extensions = &BookingTPA_Extensions{
BookingContactInfo: &BookingContactInfo{
Text: p.Order.ContactInfo.MobilePhone, //"13910556253",
},
BookingEnvelopType: &BookingEnvelopType{
Text: "KI",
},
}
if booking := galaxylib.GalaxyCfgFile.MustBool("booking", "enableBooking"); booking == false {
p.PNR = "TEST1231"
return nil
}
ibe := NewIBE(bookingURL, bk.BookingOTA_AirBookRQ)
rev, err := ibe.Reqeust() //ReqeustIBE(bookingURL, bk.BookingOTA_AirBookRQ)
if err != nil {
return err
//return err
}
fmt.Println(string(rev))
var rs *RSBookingOTA_AirBookRS
if errXML := xml.Unmarshal(rev, &rs); errXML != nil {
return galaxylib.DefaultGalaxyError.FromError(1, errXML)
}
p.PNR = rs.RSBookingAirReservation.RSBookingBookingReferenceID.AttrID
galaxylib.GalaxyLogger.Warningln(p.PNR)
return nil
}
type BookingAirItinerary struct {
BookingOriginDestinationOptions *BookingOriginDestinationOptions `xml:" OriginDestinationOptions,omitempty" json:"OriginDestinationOptions,omitempty"`
}
type BookingAirTraveler struct {
AttrGender string `xml:" Gender,attr" json:",omitempty"`
AttrPassengerTypeCode string `xml:" PassengerTypeCode,attr" json:",omitempty"`
BookingComment *BookingComment `xml:" Comment,omitempty" json:"Comment,omitempty"`
BookingDocument *BookingDocument `xml:" Document,omitempty" json:"Document,omitempty"`
// BookingDocumentFlightBinding *BookingDocumentFlightBinding `xml:" DocumentFlightBinding,omitempty" json:"DocumentFlightBinding,omitempty"`
BookingFlightSegmentRPHs *BookingFlightSegmentRPHs `xml:" FlightSegmentRPHs,omitempty" json:"FlightSegmentRPHs,omitempty"`
BookingPersonName *BookingPersonName `xml:" PersonName,omitempty" json:"PersonName,omitempty"`
BookingTravelerRefNumber []*BookingTravelerRefNumber `xml:" TravelerRefNumber,omitempty" json:"TravelerRefNumber,omitempty"`
}
type BookingAirline struct {
AttrCode string `xml:" Code,attr" json:",omitempty"`
}
type BookingArrivalAirport struct {
AttrLocationCode string `xml:" LocationCode,attr" json:",omitempty"`
}
type BookingBookingClassAvail struct {
AttrResBookDesigCode string `xml:" ResBookDesigCode,attr" json:",omitempty"`
}
type BookingRoot struct {
BookingOTA_AirBookRQ *BookingOTA_AirBookRQ `xml:" OTA_AirBookRQ,omitempty" json:"OTA_AirBookRQ,omitempty"`
}
type BookingComment struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingContactInfo struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingDepartureAirport struct {
AttrLocationCode string `xml:" LocationCode,attr" json:",omitempty"`
}
type BookingDocHolderFormattedName struct {
BookingGivenName *BookingGivenName `xml:" GivenName,omitempty" json:"GivenName,omitempty"`
BookingSurname *BookingSurname `xml:" Surname,omitempty" json:"Surname,omitempty"`
}
type BookingDocument struct {
AttrBirthDate string `xml:" BirthDate,attr" json:",omitempty"`
// AttrDocHolderInd string `xml:" DocHolderInd,attr" json:",omitempty"` //是否为证件持有者
AttrDocHolderNationality string `xml:" DocHolderNationality,attr" json:",omitempty"`
AttrDocID string `xml:" DocID,attr" json:",omitempty"`
AttrDocIssueCountry string `xml:" DocIssueCountry,attr" json:",omitempty"`
AttrDocType string `xml:" DocType,attr" json:",omitempty"`
AttrDocTypeDetail string `xml:" DocTypeDetail,attr" json:",omitempty"`
AttrExpireDate string `xml:" ExpireDate,attr" json:",omitempty"`
AttrGender string `xml:" Gender,attr" json:",omitempty"`
AttrRPH string `xml:" RPH,attr" json:",omitempty"`
BookingDocHolderFormattedName *BookingDocHolderFormattedName `xml:" DocHolderFormattedName,omitempty" json:"DocHolderFormattedName,omitempty"`
}
type BookingDocumentFlightBinding struct {
BookingDocumentRPH *BookingDocumentRPH `xml:" DocumentRPH,omitempty" json:"DocumentRPH,omitempty"`
BookingFlightSegmentRPH *BookingFlightSegmentRPH `xml:" FlightSegmentRPH,omitempty" json:"FlightSegmentRPH,omitempty"`
}
type BookingDocumentRPH struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingEnvelopType struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingEquipment struct {
AttrAirEquipType string `xml:" AirEquipType,attr" json:",omitempty"`
}
//航段信息
type BookingFlightSegment struct {
AttrArrivalDateTime string `xml:" ArrivalDateTime,attr" json:",omitempty"`
AttrCodeshareInd string `xml:" CodeshareInd,attr" json:",omitempty"` //是否共享航班,一般false
AttrDepartureDateTime string `xml:" DepartureDateTime,attr" json:",omitempty"`
AttrFlighttNumber string `xml:" FlightNumber,attr" json:",omitempty"`
AttrRPH string `xml:" RPH,attr" json:",omitempty"` //航段编号,与旅客、ssr 等信息关联,多航段时,编号请勿重复。
AttrSegmentType string `xml:" SegmentType,attr" json:",omitempty"` //NORMAL-普通航段 OPEN-不定期航段 ARRIVAL_UNKOWN_ARNK信息航段一般只写 NORMAL
AttrStatus string `xml:" Status,attr" json:",omitempty"` //即航段状态
BookingArrivalAirport *BookingArrivalAirport `xml:" ArrivalAirport,omitempty" json:"ArrivalAirport,omitempty"`
BookingBookingClassAvail *BookingBookingClassAvail `xml:" BookingClassAvail,omitempty" json:"BookingClassAvail,omitempty"`
BookingDepartureAirport *BookingDepartureAirport `xml:" DepartureAirport,omitempty" json:"DepartureAirport,omitempty"`
// BookingEquipment *BookingEquipment `xml:" Equipment,omitempty" json:"Equipment,omitempty"`
BookingMarketingAirline *BookingMarketingAirline `xml:" MarketingAirline,omitempty" json:"MarketingAirline,omitempty"`
}
type BookingFlightSegmentRPH struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingFlightSegmentRPHs struct {
BookingFlightSegmentRPH *BookingFlightSegmentRPH `xml:" FlightSegmentRPH,omitempty" json:"FlightSegmentRPH,omitempty"`
}
type BookingGivenName struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingMarketingAirline struct {
AttrCode string `xml:" Code,attr" json:",omitempty"`
}
type BookingOTA_AirBookRQ struct {
BookingAirItinerary *BookingAirItinerary `xml:" AirItinerary,omitempty" json:"AirItinerary,omitempty"`
BookingPOS *BookingPOS `xml:" POS,omitempty" json:"POS,omitempty"`
BookingTPA_Extensions *BookingTPA_Extensions `xml:" TPA_Extensions,omitempty" json:"TPA_Extensions,omitempty"`
BookingTicketing *BookingTicketing `xml:" Ticketing,omitempty" json:"Ticketing,omitempty"`
BookingTravelerInfo *BookingTravelerInfo `xml:" TravelerInfo,omitempty" json:"TravelerInfo,omitempty"`
XMLName xml.Name `xml:"OTA_AirBookRQ"`
}
type BookingOriginDestinationOption struct {
BookingFlightSegment []*BookingFlightSegment `xml:" FlightSegment,omitempty" json:"FlightSegment,omitempty"`
}
type BookingOriginDestinationOptions struct {
BookingOriginDestinationOption *BookingOriginDestinationOption `xml:" OriginDestinationOption,omitempty" json:"OriginDestinationOption,omitempty"`
}
type BookingOtherServiceInformation struct {
AttrCode string `xml:" Code,attr" json:",omitempty"`
BookingAirline *BookingAirline `xml:" Airline,omitempty" json:"Airline,omitempty"`
BookingText *BookingText `xml:" Text,omitempty" json:"Text,omitempty"`
BookingTravelerRefNumber []*BookingTravelerRefNumber `xml:" TravelerRefNumber,omitempty" json:"TravelerRefNumber,omitempty"`
}
type BookingOtherServiceInformations struct {
BookingOtherServiceInformation []*BookingOtherServiceInformation `xml:" OtherServiceInformation,omitempty" json:"OtherServiceInformation,omitempty"`
}
type BookingPOS struct {
BookingSource *BookingSource `xml:" Source,omitempty" json:"Source,omitempty"`
}
type BookingPersonName struct {
AttrLanguageType string `xml:" LanguageType,attr" json:",omitempty"`
BookingSurname *BookingSurname `xml:" Surname,omitempty" json:"Surname,omitempty"`
}
type BookingSource struct {
AttrPseudoCityCode string `xml:" PseudoCityCode,attr" json:",omitempty"`
}
type BookingSpecialReqDetails struct {
BookingOtherServiceInformations *BookingOtherServiceInformations `xml:" OtherServiceInformations,omitempty" json:"OtherServiceInformations,omitempty"`
}
type BookingSurname struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingTPA_Extensions struct {
BookingContactInfo *BookingContactInfo `xml:" ContactInfo,omitempty" json:"ContactInfo,omitempty"`
BookingEnvelopType *BookingEnvelopType `xml:" EnvelopType,omitempty" json:"EnvelopType,omitempty"`
}
type BookingText struct {
Text string `xml:",chardata" json:",omitempty"`
}
type BookingTicketing struct {
AttrTicketTimeLimit string `xml:" TicketTimeLimit,attr" json:",omitempty"`
}
type BookingTravelerInfo struct {
BookingAirTraveler []*BookingAirTraveler `xml:" AirTraveler,omitempty" json:"AirTraveler,omitempty"`
BookingSpecialReqDetails *BookingSpecialReqDetails `xml:" SpecialReqDetails,omitempty" json:"SpecialReqDetails,omitempty"`
}
type BookingTravelerRefNumber struct {
AttrRPH string `xml:" RPH,attr" json:",omitempty"` //旅客编号,请不要输入重复的值若分 pnr 预订,请必填此项
}
/*************************************
Response type
********************************************/
type RSBookingAirItinerary struct {
RSBookingFlightSegments *RSBookingFlightSegments `xml:" FlightSegments,omitempty" json:"FlightSegments,omitempty"`
}
type RSBookingAirReservation struct {
RSBookingAirItinerary *RSBookingAirItinerary `xml:" AirItinerary,omitempty" json:"AirItinerary,omitempty"`
RSBookingBookingReferenceID *RSBookingBookingReferenceID `xml:" BookingReferenceID,omitempty" json:"BookingReferenceID,omitempty"`
RSBookingComment []*RSBookingComment `xml:" Comment,omitempty" json:"Comment,omitempty"`
}
type RSBookingArrivalAirport struct {
AttrLocationCode string `xml:" LocationCode,attr" json:",omitempty"`
}
type RSBookingBookingClassAvail struct {
AttrResBookDesigCode string `xml:" ResBookDesigCode,attr" json:",omitempty"`
}
type RSBookingBookingReferenceID struct {
AttrID string `xml:" ID,attr" json:",omitempty"`
AttrID_Context string `xml:" ID_Context,attr" json:",omitempty"`
}
type RSBookingRoot struct {
RSBookingOTA_AirBookRS *RSBookingOTA_AirBookRS `xml:" OTA_AirBookRS,omitempty" json:"OTA_AirBookRS,omitempty"`
}
type RSBookingComment struct {
Text string `xml:",chardata" json:",omitempty"`
}
type RSBookingDepartureAirport struct {
AttrLocationCode string `xml:" LocationCode,attr" json:",omitempty"`
}
type RSBookingFlightSegment struct {
AttrArrivalDateTime string `xml:" ArrivalDateTime,attr" json:",omitempty"`
AttrCodeshareInd string `xml:" CodeshareInd,attr" json:",omitempty"`
AttrDepartureDateTime string `xml:" DepartureDateTime,attr" json:",omitempty"`
AttrFlightNumber string `xml:" FlightNumber,attr" json:",omitempty"`
AttrNumberInParty string `xml:" NumberInParty,attr" json:",omitempty"`
AttrSegmentType string `xml:" SegmentType,attr" json:",omitempty"`
AttrStatus string `xml:" Status,attr" json:",omitempty"`
RSBookingArrivalAirport *RSBookingArrivalAirport `xml:" ArrivalAirport,omitempty" json:"ArrivalAirport,omitempty"`
RSBookingBookingClassAvail *RSBookingBookingClassAvail `xml:" BookingClassAvail,omitempty" json:"BookingClassAvail,omitempty"`
RSBookingDepartureAirport *RSBookingDepartureAirport `xml:" DepartureAirport,omitempty" json:"DepartureAirport,omitempty"`
RSBookingMarketingAirline *RSBookingMarketingAirline `xml:" MarketingAirline,omitempty" json:"MarketingAirline,omitempty"`
RSBookingOperatingAirline *RSBookingOperatingAirline `xml:" OperatingAirline,omitempty" json:"OperatingAirline,omitempty"`
}
type RSBookingFlightSegments struct {
RSBookingFlightSegment *RSBookingFlightSegment `xml:" FlightSegment,omitempty" json:"FlightSegment,omitempty"`
}
type RSBookingMarketingAirline struct {
AttrCode string `xml:" Code,attr" json:",omitempty"`
}
type RSBookingOTA_AirBookRS struct {
RSBookingAirReservation *RSBookingAirReservation `xml:" AirReservation,omitempty" json:"AirReservation,omitempty"`
}
type RSBookingOperatingAirline struct {
}
|
package phase
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/pmezard/go-difflib/difflib"
"io"
"log"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
)
type Phase struct {
answers Answers
f *os.File
r *bufio.Reader
}
type CheckOpts struct {
MaxErrors int
}
func New(dataPath string, phaseId uint8) *Phase {
f, err := os.Open(filepath.Join(dataPath, "ammo", getPhaseFn(phaseId)+".ammo"))
if err != nil {
log.Fatalf("cannot open ammo file: %s", err.Error())
}
answers, err := LoadAnswers(filepath.Join(dataPath, "answers", getPhaseFn(phaseId)+".answ"))
if err != nil {
log.Fatalf("cannot load answers: %s", err.Error())
}
r := bufio.NewReader(f)
return &Phase{answers, f, r}
}
func (p *Phase) Check(server string, opts CheckOpts) {
i := 0
errors := 0
addError := func() {
errors++
if opts.MaxErrors != 0 && errors >= opts.MaxErrors {
log.Fatalf("Max errors reached (%d), exited", opts.MaxErrors)
}
}
server = strings.TrimSuffix(server, "/");
for {
i++
szStr, err := p.r.ReadString(' ')
if err != nil {
if err == io.EOF {
break
}
log.Fatalf("cannot read ammo file: %s", err.Error())
}
sz, err := strconv.ParseInt(strings.TrimSpace(szStr), 10, 64)
if err != nil {
log.Fatalf("cannot read ammo file: %s", err.Error())
}
_, err = p.r.ReadString('\n')
if err != nil {
log.Fatalf("cannot read ammo file: %s", err.Error())
}
buf := make([]byte, sz)
readed := 0
for readed < int(sz) {
n, err := p.r.Read(buf[readed:])
if err != nil {
log.Fatalf("cannot read ammo file: %s", err.Error())
}
readed += n
}
req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf)))
if err != nil {
log.Fatalf("Cannot parse request: %s", err.Error())
}
queryId := getQueryId(req.RequestURI)
//if !strings.HasPrefix(req.RequestURI, "/accounts/likes/") {
// continue
//}
log.Printf("[%d] %s", i, req.RequestURI)
expectedAnswer := p.answers[queryId]
req.URL, err = req.URL.Parse(server + req.RequestURI)
if err != nil {
log.Fatalf("Cannot parse URL: %s", err.Error())
}
req.RequestURI = ""
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Printf("\tHTTP error: %s", err.Error())
addError()
continue
}
if resp.StatusCode != expectedAnswer.RespCode {
log.Printf("\tExpected code %d, got %d", expectedAnswer.RespCode, resp.StatusCode)
if req.Method == http.MethodPost {
req, _ := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf)))
_, _ = io.Copy(os.Stdout, req.Body)
}
addError()
continue
}
if resp.StatusCode != 200 && resp.StatusCode != 201 && resp.StatusCode != 202 {
continue
}
var got, expected interface{}
err = json.NewDecoder(resp.Body).Decode(&got)
if err != nil {
log.Printf("\tCannot parse response JSON: %s", err.Error())
addError()
continue
}
err = json.Unmarshal(expectedAnswer.Body, &expected)
if err != nil {
log.Fatalf("cannot parse ammo JSON: %s", err.Error())
}
if !reflect.DeepEqual(got, expected) {
log.Printf("\tExptected: %s", spewConfig.Sprint(expected))
log.Printf("\t Got: %s", spewConfig.Sprint(got))
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(spewConfig.Sdump(expected)),
B: difflib.SplitLines(spewConfig.Sdump(got)),
FromFile: "Expected",
//FromDate: "",
ToFile: "Got",
//ToDate: "",
Context: 1,
})
log.Printf("\t Diff:\n%s", diff)
addError()
continue
}
}
log.Printf("------- Finished")
log.Printf("Valid answers: %d (%.02f%%)", i-errors, float64(i-errors)/float64(i)*100)
}
func getPhaseFn(phaseId uint8) string {
suffix := "get"
if phaseId == 2 {
suffix = "post"
}
return fmt.Sprintf("phase_%d_%s", phaseId, suffix)
}
|
/*
* winnow: weighted point selection
*
* input:
* matrix: an integer matrix, whose values are used as masses
* mask: a boolean matrix showing which points are eligible for
* consideration
* nrows, ncols: the number of rows and columns
* nelts: the number of points to select
*
* output:
* points: a vector of (x, y) points
*
:b*/
package main
import (
"flag"
"fmt"
"sort"
)
type ByteMatrix struct {
Rows, Cols uint32
array []byte
}
func WrapBytes(r, c uint32, bytes []byte) *ByteMatrix {
return &ByteMatrix{r, c, bytes}
}
func NewByteMatrix(r, c uint32) *ByteMatrix {
return &ByteMatrix{r, c, make([]byte, r*c)}
}
func (m *ByteMatrix) Row(i uint32) []byte {
return m.array[i*m.Cols : (i+1)*m.Cols]
}
func (m *ByteMatrix) Bytes() []byte {
return m.array[0 : m.Rows*m.Cols]
}
var is_bench = flag.Bool("is_bench", false, "")
var matrix []byte
var mask [][]bool
var points []uint32
type WinnowPoints struct {
m *ByteMatrix
e []uint32 // indexes into the ByteMatrix 'm'
}
func (p *WinnowPoints) Len() int {
return len(p.e)
}
func (p *WinnowPoints) Swap(i, j int) {
p.e[i], p.e[j] = p.e[j], p.e[i]
}
func (p *WinnowPoints) Less(i, j int) bool {
if p.m.array[p.e[i]] != p.m.array[p.e[j]] {
return p.m.array[p.e[i]] < p.m.array[p.e[j]]
}
return p.e[i] < p.e[j]
}
func Winnow(m *ByteMatrix, nrows, ncols, nelts uint32) {
var values WinnowPoints
values.m = m
for i := uint32(0); i < nrows; i++ {
for j := uint32(0); j < ncols; j++ {
if *is_bench {
mask[i][j] = ((i * j) % (ncols + 1)) == 1
}
if mask[i][j] {
idx := i*ncols + j
values.e = append(values.e, idx)
}
}
}
sort.Sort(&values)
chunk := uint32(values.Len()) / nelts
for i := uint32(0); i < nelts; i++ {
points[i] = values.e[i*chunk]
}
}
func read_integer() int {
var value int
for true {
var read, _ = fmt.Scanf("%d", &value)
if read == 1 {
break
}
}
return value
}
func read_matrix(nrows, ncols uint32) {
for i := uint32(0); i < nrows; i++ {
for j := uint32(0); j < ncols; j++ {
matrix[i*ncols+j] = byte(read_integer())
}
}
}
func read_mask(nrows, ncols uint32) {
for i := uint32(0); i < nrows; i++ {
for j := uint32(0); j < ncols; j++ {
mask[i][j] = (read_integer() == 1)
}
}
}
func main() {
var nrows, ncols, nelts uint32
flag.Parse()
nrows = uint32(read_integer())
ncols = uint32(read_integer())
m := NewByteMatrix(nrows, ncols)
matrix = m.array
mask = make ([][]bool, nrows)
for i := range mask {
mask [i] = make ([]bool, ncols)
}
if !*is_bench {
read_matrix(nrows, ncols)
read_mask(nrows, ncols)
}
nelts = uint32(read_integer())
points = make([]uint32, nelts)
Winnow(m, nrows, ncols, nelts)
if !*is_bench {
fmt.Printf("%d\n", nelts)
for i := uint32(0); i < nelts; i++ {
fmt.Printf("%d %d\n", points[i]/ncols, points[i]%ncols)
}
fmt.Printf("\n")
}
}
|
package main
import (
"SoftwareGoDay1/humanity"
)
func main() {
// data.ReadFile("./test.csv")
// data.LineToCSV("abc,def,ghi")
// humanity.NewHumanFromCsvFile("./test.csv")
// humanity.NewHumanFromJsonFile("./medium.json")
pilotList := []humanity.Preparer{
&humanity.Pilot{
Human: &humanity.Human{
Name: "Jason",
Age: 25,
Country: "USA",
Ready: false,
},
},
&humanity.Pilot{
Human: &humanity.Human{
Name: "Jean-René",
Age: 77,
Country: "France",
Ready: false,
},
},
}
humanity.PrepareMissionPart(pilotList[0])
humanity.PrepareMissionPart(pilotList[0])
humanity.PrepareMissionPart(pilotList[1])
humanity.PrepareMissionPart(pilotList[1])
}
|
// Package main - пакет поискового робота для задания 7
package main
import (
"fmt"
"go.core/lesson7/pkg/cache/local"
"go.core/lesson7/pkg/crawler"
"go.core/lesson7/pkg/crawler/spider"
"go.core/lesson7/pkg/engine"
"go.core/lesson7/pkg/index"
"go.core/lesson7/pkg/storage"
"strings"
)
// Сервер поисковика GoSearch
type gosearch struct {
engine *engine.Service
scanner crawler.Interface
index index.Interface
storage storage.Interface
}
// new - Конструктор gosearch
func new() *gosearch {
var s spider.Scanner
gs := gosearch{}
gs.scanner = crawler.New(s)
gs.index = index.New()
gs.storage = storage.New()
gs.engine = engine.New(gs.index, gs.storage, local.New("../data/storage.txt"))
return &gs
}
// init - функция инциализации. Запускает сканирование и обрабатыает полученные данные
func (gs *gosearch) init(urls []string) {
err := gs.engine.Load() // Пробуем загрузить данные из файла хранилища
if err != nil { // Если не удалось - запускаем сканирование синхронно
docs, err := gs.scanner.Scan(urls, 2)
if err != nil {
fmt.Println("Не удалось просканировать сайты")
return
}
gs.index.Create(docs)
gs.storage.Create(docs)
err = gs.engine.Save(docs)
if err != nil {
fmt.Println("Не удалось закэшировать данные")
}
return
}
go func() { // Если удалось - запускаем сканирование асинхронно и продолжаем работу
docs, err := gs.scanner.Scan(urls, 2)
if err != nil {
fmt.Println("Не удалось просканировать сайты")
return
}
gs.index.Create(docs)
gs.storage.Create(docs)
err = gs.engine.Save(docs)
if err != nil {
fmt.Println("Не удалось закэшировать данные")
}
}()
}
// run - функция запускает интерактивный интерфейс командной строки для поиска
func (gs *gosearch) run() {
var str string
for {
fmt.Print("Введите поисковый запрос: ")
_, err := fmt.Scanf("%s\n", &str)
if err != nil {
fmt.Println("Программа завершила работу.")
return
}
docs := gs.engine.Search(strings.ToLower(str))
fmt.Printf("Результаты поиска по запросу \"%s\":\nНайдено всего: %d\n", str, len(docs))
for _, doc := range docs {
fmt.Println(doc)
}
}
}
func main() {
urls := []string{
"https://altech.online",
"https://www.coffeestainstudios.com",
"https://golangs.org",
"https://www.cyberpunk.net/ru/ru/",
}
gs := new()
gs.init(urls)
gs.run()
}
|
package main
import (
"flag"
"fmt"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"os"
)
var in string
func init() {
flag.StringVar(&in, "in", "", "input file")
}
func main() {
flag.Parse()
reader := os.Stdin
if in != "" {
file, err := os.Open(in)
if err != nil {
fatal(err.Error())
}
reader = file
}
config, format, err := image.DecodeConfig(reader)
if err != nil {
fatal(err.Error())
}
fmt.Println("format\twidth\theight")
fmt.Println(fmt.Sprintf("%s\t%d\t%d", format, config.Width, config.Height))
}
func fatal(a ...interface{}) {
fmt.Fprintln(os.Stderr)
os.Exit(1)
}
|
package handler
import (
"github.com/micro/go-micro/errors"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
"github.com/dgrijalva/jwt-go"
"github.com/dakstudios/auth-srv/db"
auth "github.com/dakstudios/auth-srv/proto/auth"
)
const (
jwtSecret = "some_secure_secret"
)
type userClaims struct {
ID string `json:"id"`
jwt.StandardClaims
}
type Auth struct{}
func (a *Auth) Authenticate(ctx context.Context, req *auth.AuthenticateRequest, res *auth.AuthenticateResponse) error {
if len(req.Email) == 0 || len(req.Password) == 0 {
return errors.BadRequest("srv.auth.Authenticate", "invalid_request")
}
user, err := db.FindUser(req.Email)
if err != nil {
return errors.InternalServerError("srv.auth.Authenticate", "server_error")
}
if err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(req.Password)); err != nil {
return errors.BadRequest("srv.auth.Authenticate", "access_denied")
}
claims := userClaims{
user.Id,
jwt.StandardClaims{
ExpiresAt: 15000,
Issuer: "org.dakstudio.srv.auth",
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signed, err := token.SignedString([]byte(jwtSecret))
if err != nil {
return errors.InternalServerError("srv.auth.Authenticate", "server_error")
}
res.Token = &auth.Token{Token: signed}
return nil
}
func (a *Auth) Authorize(ctx context.Context, req *auth.AuthorizeRequest, res *auth.AuthorizeResponse) error {
if len(req.Token.Token) == 0 || len(req.Permission) == 0 {
return errors.BadRequest("srv.auth.Authenticate", "invalid_request")
}
token, err := jwt.ParseWithClaims(req.Token.Token, &userClaims{}, func(token *jwt.Token) (interface{}, error) {
return []byte(jwtSecret), nil
})
if !token.Valid {
return errors.BadRequest("srv.auth.Authenticate", "access_denied")
}
claims, ok := token.Claims.(*userClaims)
if !ok {
return errors.InternalServerError("srv.auth.Authenticate", "server_error")
}
authorized, err := db.Authorize(claims.ID, req.Permission)
if err != nil {
return errors.InternalServerError("srv.auth.Authenticate", "server_error")
}
res.Authorized = authorized
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.