text stringlengths 11 4.05M |
|---|
package nrpc
import (
"context"
"github.com/stretchr/testify/require"
"testing"
"time"
)
func TestClient_Call(t *testing.T) {
s := NewServer(ServerOptions{Addr: "127.0.0.1:10087"})
s.Register(&TestService{})
go s.Start(nil)
defer s.Shutdown(context.Background())
time.Sleep(time.Second)
c := NewClient(ClientOptions{MaxRetries: 1})
c.Register("TestService", "127.0.0.1:10087")
err := c.Query("TestService.Method1").Do(context.Background())
require.Error(t, err)
require.Equal(t, "test error", err.Error())
require.False(t, IsSolid(err))
in := &TestIn{Hello: "world3"}
err = c.Query("TestService.Method2", in).Do(context.Background())
require.Error(t, err)
require.Equal(t, "test error: world3", err.Error())
require.True(t, IsSolid(err))
}
|
package main
import (
"encoding/json"
"fmt"
"time"
"reflect"
)
func main() {
Serialize()
Unmarshal()
}
type Student struct {
StuId int `json:"id"`
Name string `json:"name"`
Class string `json:"class"`
RegTime time.Time `json:"reg_time"`
}
func Serialize() {
stu := new(Student)
stu.StuId = 1001
stu.Name = "zs"
stu.Class = "Class F"
stu.RegTime = time.Now()
b, err := json.Marshal(&stu) //注意传指针,尤其是使用map的时候
if err != nil {
fmt.Println("Marshal error:", err.Error())
return
}
fmt.Println("After Marshal stu:", string(b))
fmt.Println(reflect.TypeOf(b))
}
func Unmarshal() {
jsonString := `{"id":1001,"name":"zs","class":"Class F","reg_time":"2020-04-30T22:32:21.7691063+08:00"}`
stu := new(Student)
err := json.Unmarshal([]byte(jsonString), &stu)
if err != nil {
fmt.Println("Unmarshal error:", err.Error())
return
}
fmt.Println("After Unmarshal from json:", stu)
fmt.Println(reflect.TypeOf(stu))
}
|
package timehelper
import (
"fmt"
"reflect"
"testing"
"time"
)
func TestIntAsMonth(t *testing.T) {
type args struct {
month int
}
tests := []struct {
name string
args args
want time.Month
wantErr bool
}{
{name: "1e", args: args{month: 0}, want: month0, wantErr: true},
{name: "2e", args: args{month: -23}, want: month0, wantErr: true},
{name: "3e", args: args{month: 23}, want: month0, wantErr: true},
{name: "1", args: args{month: 6}, want: time.June, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := IntAsMonth(tt.args.month)
if (err != nil) != tt.wantErr {
t.Errorf("IntAsMonth() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("IntAsMonth() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsLeapYear(t *testing.T) {
type args struct {
year int
}
tests := []struct {
name string
args args
want bool
}{
{name: "1", args: args{year: 1800}, want: false},
{name: "2", args: args{year: 1900}, want: false},
{name: "3", args: args{year: 2000}, want: true},
{name: "4", args: args{year: 2018}, want: false},
{name: "5", args: args{year: 2020}, want: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsLeapYear(tt.args.year); got != tt.want {
t.Errorf("IsLeapYear() = %v, want %v", got, tt.want)
}
})
}
}
func TestLastDayOfMonth(t *testing.T) {
type args struct {
year int
month time.Month
}
tests := []struct {
name string
args args
want time.Time
wantErr bool
}{
{name: "1e", args: args{year: 2018, month: month0}, want: time0, wantErr: true},
{name: "1", args: args{year: 2018, month: time.February}, want: dateYMDPrim(2018, time.February, 28), wantErr: false},
{name: "2", args: args{year: 2018, month: time.December}, want: dateYMDPrim(2018, time.December, 31), wantErr: false},
{name: "3", args: args{year: 2020, month: time.February}, want: dateYMDPrim(2020, time.February, 29), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := LastDayOfMonth(tt.args.year, tt.args.month)
if (err != nil) != tt.wantErr {
t.Errorf("LastDayOfMonth() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("LastDayOfMonth() = %v, want %v", got, tt.want)
}
})
}
}
func TestDaysInMonth(t *testing.T) {
type args struct {
year int
month time.Month
}
tests := []struct {
name string
args args
want int
wantErr bool
}{
{name: "1", args: args{year: 2018, month: time.February}, want: 28},
{name: "2", args: args{year: 2020, month: time.February}, want: 29},
{name: "3", args: args{year: 2000, month: time.February}, want: 29},
{name: "1", args: args{year: 2018, month: time.June}, want: 30},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := DaysInMonth(tt.args.year, tt.args.month)
if (err != nil) != tt.wantErr {
t.Errorf("DaysInMonth() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("DaysInMonth() = %v, want %v", got, tt.want)
}
})
}
}
func TestDateYMDEr(t *testing.T) {
type args struct {
year int
month int
day int
}
tests := []struct {
name string
args args
want time.Time
wantErr bool
}{
{name: "1e", args: args{year: 2018, month: 40, day: 3}, want: dateYMDPrim(2018, time.April, 2), wantErr: true},
{name: "2e", args: args{year: 2018, month: 4, day: 31}, want: dateYMDPrim(2018, time.April, 2), wantErr: true},
{name: "1", args: args{year: 2018, month: 4, day: 2}, want: dateYMDPrim(2018, time.April, 2), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := DateYMDEr(tt.args.year, tt.args.month, tt.args.day)
if (err != nil) != tt.wantErr {
t.Errorf("DateYMDEr() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
if !got.IsZero() {
t.Errorf("DateYMDEr() = %v, want zero Time", got)
}
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DateYMDEr() = %v, want %v", got, tt.want)
}
})
}
}
func TestFirstWeekdayInYear(t *testing.T) {
type args struct {
year int
weekday time.Weekday
}
tests := []struct {
name string
args args
want time.Time
wantErr bool
}{
{name: "1e", args: args{year: 2018, weekday: time.Weekday(-123)}, want: time0, wantErr: true},
{name: "2e", args: args{year: 2018, weekday: time.Weekday(123)}, want: time0, wantErr: true},
{name: "1", args: args{year: 2018, weekday: time.Monday}, want: dateYMDPrim(2018, time.January, 1), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := FirstWeekdayInYear(tt.args.year, tt.args.weekday)
if (err != nil) != tt.wantErr {
t.Errorf("FirstWeekdayInYear() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("FirstWeekdayInYear() = %v, want %v", got, tt.want)
}
})
}
}
func TestTwoFirstWeekdayInYearEqual(t *testing.T) {
type args struct {
year int
weekday time.Weekday
}
tests := []struct {
name string
args args
args2 args
}{
{name: "1e", args: args{year: 2018, weekday: time.Weekday(-123)}, args2: args{year: 2018, weekday: time.Weekday(123)}},
{name: "1", args: args{year: 2018, weekday: time.Monday}, args2: args{year: 2018, weekday: time.Monday}},
{name: "2", args: args{year: 2020, weekday: time.Friday}, args2: args{year: 2020, weekday: time.Friday}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := FirstWeekdayInYear(tt.args.year, tt.args.weekday)
got2, err2 := FirstWeekdayInYear2(tt.args2.year, tt.args2.weekday)
if !reflect.DeepEqual(err, err2) {
t.Errorf("FirstWeekdayInYear() error = %v, FirstWeekdayInYear2() error = %v", err, err2)
return
}
if got != got2 {
t.Errorf("FirstWeekdayInYear() = %v, FirstWeekdayInYear2() = %v", got, got2)
}
})
}
}
func TestTwoFridaysInYearEqual(t *testing.T) {
type args struct {
year int
}
type test struct {
name string
args args
}
tests := make([]test, 0)
tests = append(tests, test{name: "1800", args: args{year: 1800}})
for i := 0; i < 10; i++ {
tests = append(tests, test{name: fmt.Sprint(i), args: args{year: 2000 + i}})
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := FridaysInYear(tt.args.year)
got2 := FridaysInYear2(tt.args.year)
if got != got2 {
t.Errorf("FridaysInYear() = %v, FridaysInYear2() = %v", got, got2)
}
})
}
}
|
package parser
import (
"errors"
"fmt"
"reflect"
)
// Parser represents a parser
type Parser struct {
grammar *Rule
errGrammar *Rule
recursionRegister recursionRegister
// MaxRecursionLevel defines the maximum tolerated recursion level.
// The limitation is disabled when MaxRecursionLevel is set to 0
MaxRecursionLevel uint
}
// NewParser creates a new parser instance
func NewParser(grammar *Rule, errGrammar *Rule) (*Parser, error) {
if grammar == nil {
return nil, errors.New("missing grammar")
}
if err := ValidatePattern(grammar); err != nil {
return nil, fmt.Errorf("invalid grammar: %w", err)
}
if errGrammar != nil {
if err := ValidatePattern(errGrammar); err != nil {
return nil, fmt.Errorf("invalid error-grammar: %w", err)
}
}
recRegister := recursionRegister{}
findRules(grammar, recRegister)
findRules(errGrammar, recRegister)
return &Parser{
grammar: grammar,
errGrammar: errGrammar,
recursionRegister: recRegister,
// Disable recursion limitation by default
MaxRecursionLevel: uint(0),
}, nil
}
func (pr Parser) handlePattern(
debug *DebugProfile,
scan *scanner,
pattern Pattern,
level uint,
) (frag Fragment, err error) {
switch pt := pattern.(type) {
case *Rule:
frag, err = pr.parseRule(debug, scan.New(), pt, level)
if err, ok := err.(*ErrUnexpectedToken); ok {
// Override expected pattern to the higher-order rule
err.Expected = pt
}
case *Exact:
frag, err = pr.parseExact(debug, scan, pt, level)
case *Lexed:
frag, err = pr.parseLexed(debug, scan, pt, level)
case *Repeated:
err = pr.parseRepeated(debug, scan, pt.Min, pt.Max, pt, level)
case Sequence:
err = pr.parseSequence(debug, scan, pt, level)
case Either:
frag, err = pr.parseEither(debug, scan, pt, level)
case Not:
err = pr.parseNot(debug, scan, pt, level)
default:
panic(fmt.Errorf(
"unsupported pattern type: %s",
reflect.TypeOf(pattern),
))
}
return
}
func (pr Parser) parseNot(
debug *DebugProfile,
scan *scanner,
ptr Not,
level uint,
) error {
debugIndex := debug.record(ptr, scan.Lexer.cr, level)
beforeCr := scan.Lexer.cr
_, err := pr.handlePattern(debug, scan, ptr.Pattern, level+1)
switch err := err.(type) {
case *ErrUnexpectedToken:
scan.Set(beforeCr)
return nil
case errEOF:
scan.Set(beforeCr)
return nil
case nil:
debug.markMismatch(debugIndex)
return &ErrUnexpectedToken{
At: beforeCr,
Expected: ptr,
}
default:
return err
}
}
func (pr Parser) parseLexed(
debug *DebugProfile,
scanner *scanner,
expected *Lexed,
level uint,
) (Fragment, error) {
debugIndex := debug.record(expected, scanner.Lexer.cr, level)
if scanner.Lexer.reachedEOF() {
debug.markMismatch(debugIndex)
return nil, errEOF{}
}
beforeCr := scanner.Lexer.cr
tk, err := scanner.ReadUntil(expected.Fn, expected.Kind)
if err != nil {
return nil, err
}
if tk == nil || tk.VEnd.Index-tk.VBegin.Index < expected.MinLen {
debug.markMismatch(debugIndex)
return nil, &ErrUnexpectedToken{
At: beforeCr,
Expected: expected,
}
}
return tk, nil
}
func (pr Parser) parseRepeated(
debug *DebugProfile,
scanner *scanner,
min uint,
max uint,
repeated *Repeated,
level uint,
) error {
debugIndex := debug.record(repeated, scanner.Lexer.cr, level)
num := uint(0)
lastPosition := scanner.Lexer.cr
for {
if max != 0 && num >= max {
break
}
frag, err := pr.handlePattern(
debug,
scanner,
repeated.Pattern,
level+1,
)
switch err := err.(type) {
case *ErrUnexpectedToken:
if min != 0 && num < min {
// Mismatch before the minimum is read
return err
}
// Reset scanner to the last match
scanner.Set(lastPosition)
return nil
case errEOF:
if min != 0 && num < min {
// Mismatch before the minimum is read
debug.markMismatch(debugIndex)
return &ErrUnexpectedToken{
At: scanner.Lexer.cr,
Expected: repeated,
}
}
// Reset scanner to the last match
scanner.Set(lastPosition)
return nil
case nil:
num++
lastPosition = scanner.Lexer.cr
// Append rule patterns, other patterns are appended automatically
if !repeated.Pattern.Container() {
scanner.Append(repeated.Pattern, frag)
}
default:
return err
}
}
return nil
}
func (pr Parser) parseSequence(
debug *DebugProfile,
scanner *scanner,
patterns Sequence,
level uint,
) error {
debugIndex := debug.record(patterns, scanner.Lexer.cr, level)
for _, pt := range patterns {
frag, err := pr.handlePattern(debug, scanner, pt, level+1)
if err != nil {
debug.markMismatch(debugIndex)
return err
}
// Append rule patterns, other patterns are appended automatically
if !pt.Container() {
scanner.Append(pt, frag)
}
}
return nil
}
func (pr Parser) parseEither(
debug *DebugProfile,
scanner *scanner,
patternOptions Either,
level uint,
) (Fragment, error) {
debugIndex := debug.record(patternOptions, scanner.Lexer.cr, level)
beforeCr := scanner.Lexer.cr
for ix, pt := range patternOptions {
lastOption := ix >= len(patternOptions)-1
frag, err := pr.handlePattern(debug, scanner, pt, level+1)
if err != nil {
if er, ok := err.(*ErrUnexpectedToken); ok {
if lastOption {
// Set actual expected pattern
er.Expected = patternOptions
debug.markMismatch(debugIndex)
} else {
// Reset scanner to the initial position
scanner.Set(beforeCr)
// Continue checking other options
continue
}
} else {
// Unexpected error
debug.markMismatch(debugIndex)
}
return nil, err
}
// Append rule patterns, other patterns are appended automatically
if !pt.Container() {
scanner.Append(pt, frag)
}
return frag, nil
}
return nil, nil
}
func (pr Parser) parseExact(
debug *DebugProfile,
scanner *scanner,
exact *Exact,
level uint,
) (Fragment, error) {
debugIndex := debug.record(exact, scanner.Lexer.cr, level)
if scanner.Lexer.reachedEOF() {
debug.markMismatch(debugIndex)
return nil, errEOF{}
}
beforeCr := scanner.Lexer.cr
tk, match, err := scanner.ReadExact(
exact.Expectation,
exact.Kind,
)
if err != nil {
return nil, err
}
if !match {
debug.markMismatch(debugIndex)
return nil, &ErrUnexpectedToken{
At: beforeCr,
Expected: exact,
}
}
return tk, nil
}
func (pr Parser) parseRule(
debug *DebugProfile,
scanner *scanner,
rule *Rule,
level uint,
) (frag Fragment, err error) {
debugIndex := debug.record(rule, scanner.Lexer.cr, level)
if pr.MaxRecursionLevel > 0 {
pr.recursionRegister[rule]++
if pr.recursionRegister[rule] > pr.MaxRecursionLevel {
// Max recursion level exceeded
return nil, &Err{
Err: fmt.Errorf(
"max recursion level exceeded at rule %p (%q)",
rule,
rule.Designation,
),
At: scanner.Lexer.cr,
}
}
}
frag, err = pr.handlePattern(debug, scanner, rule.Pattern, level+1)
if err != nil {
debug.markMismatch(debugIndex)
return
}
if !rule.Pattern.Container() {
scanner.Append(rule.Pattern, frag)
}
frag = scanner.Fragment(rule.Kind)
if rule.Action != nil {
// Execute rule action callback
if err := rule.Action(frag); err != nil {
return nil, &Err{Err: err, At: frag.Begin()}
}
}
return
}
func (pr Parser) tryErrRule(
debug *DebugProfile,
lex *lexer,
errRule *Rule,
previousUnexpErr error,
) error {
if errRule != nil {
_, err := pr.parseRule(debug, newScanner(lex), errRule, 0)
if err == nil {
// Return the previous error when no error was returned
return previousUnexpErr
}
if err, ok := err.(*ErrUnexpectedToken); ok {
// Reset expected token for the error-rule
err.Expected = nil
}
return err
}
return nil
}
// Debug parses the given source file in debug mode generating a debug profile
func (pr *Parser) Debug(source *SourceFile) (*DebugProfile, Fragment, error) {
debug := newDebugProfile()
mainFrag, err := pr.parse(source, debug)
return debug, mainFrag, err
}
// Parse parses the given source file.
//
// WARNING: Parse isn't safe for concurrent use and shall therefore
// not be executed by multiple goroutines concurrently!
func (pr *Parser) Parse(source *SourceFile) (Fragment, error) {
return pr.parse(source, nil)
}
func (pr *Parser) parse(
source *SourceFile,
debug *DebugProfile,
) (Fragment, error) {
if pr.MaxRecursionLevel > 0 {
// Reset the recursion register when recursion limitation is enabled
pr.recursionRegister.Reset()
}
cr := NewCursor(source)
lex := &lexer{cr: cr}
mainFrag, err := pr.parseRule(debug, newScanner(lex), pr.grammar, 0)
if err != nil {
if err, ok := err.(*ErrUnexpectedToken); ok {
// Reset the lexer to the start position of the error
lex.cr = err.At
}
if err := pr.tryErrRule(debug, lex, pr.errGrammar, err); err != nil {
return nil, err
}
return nil, err
}
// Ensure EOF
last, err := lex.ReadUntil(
func(uint, Cursor) bool { return true },
0,
)
switch err := err.(type) {
case errEOF:
// Ignore EOF errors
return mainFrag, nil
case nil:
default:
// Report unexpected errors
return nil, err
}
if last != nil {
if pr.errGrammar != nil {
// Try to match an error-pattern
lex.cr = last.VBegin
}
unexpErr := &ErrUnexpectedToken{At: last.VBegin}
if err := pr.tryErrRule(debug, lex, pr.errGrammar, unexpErr); err != nil {
return nil, err
}
// Fallback to default unexpected-token error
return nil, unexpErr
}
return mainFrag, nil
}
|
package imdb
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/jbowtie/gokogiri"
htmlParser "github.com/jbowtie/gokogiri/html"
)
type HttpGetter interface {
Get(url string) (resp *http.Response, err error)
}
type HttpPoster interface {
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
}
type HttpGetPoster interface {
HttpGetter
HttpPoster
}
func parsePage(client HttpGetter, url string) (*htmlParser.HtmlDocument, error) {
data, err := openPage(client, url)
if err != nil {
return nil, err
}
page, err := gokogiri.ParseHtml(data)
if err != nil {
return nil, fmt.Errorf("error parsing html: %s", err)
}
return page, nil
}
func openPage(client HttpGetter, url string) ([]byte, error) {
if client == nil {
client = http.DefaultClient
}
resp, err := client.Get(url)
if err != nil {
return nil, fmt.Errorf("unable to download imdb page: %s", err)
}
defer func() {
_ = resp.Body.Close()
}()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read imdb page: %s", err)
}
return data, nil
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package partition
import (
"strings"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/stretchr/testify/require"
)
func TestListPartitionPushDown(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database list_push_down")
tk.MustExec("use list_push_down")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists tlist")
tk.MustExec(`set tidb_enable_list_partition = 1`)
tk.MustExec(`create table tlist (a int) partition by list (a) (
partition p0 values in (0, 1, 2),
partition p1 values in (3, 4, 5))`)
tk.MustExec(`create table tcollist (a int) partition by list columns(a) (
partition p0 values in (0, 1, 2),
partition p1 values in (3, 4, 5))`)
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
var input []string
var output []struct {
SQL string
Plan []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...))
}
}
func TestListColVariousTypes(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database list_col_partition_types")
tk.MustExec("use list_col_partition_types")
tk.MustExec("drop table if exists tlist")
tk.MustExec(`set tidb_enable_list_partition = 1`)
tk.MustExec(`create table tint (a int) partition by list columns(a) (partition p0 values in (0, 1), partition p1 values in (2, 3))`)
tk.MustExec(`create table tdate (a date) partition by list columns(a) (partition p0 values in ('2000-01-01', '2000-01-02'), partition p1 values in ('2000-01-03', '2000-01-04'))`)
tk.MustExec(`create table tstring (a varchar(32)) partition by list columns(a) (partition p0 values in ('a', 'b'), partition p1 values in ('c', 'd'))`)
err := tk.ExecToErr(`create table tdouble (a double) partition by list columns(a) (partition p0 values in (0, 1), partition p1 values in (2, 3))`)
require.Error(t, err)
require.Contains(t, err.Error(), "not allowed")
err = tk.ExecToErr(`create table tdecimal (a decimal(30, 10)) partition by list columns(a) (partition p0 values in (0, 1), partition p1 values in (2, 3))`)
require.Error(t, err)
require.Contains(t, err.Error(), "not allowed")
tk.MustExec(`insert into tint values (0), (1), (2), (3)`)
tk.MustExec(`insert into tdate values ('2000-01-01'), ('2000-01-02'), ('2000-01-03'), ('2000-01-04')`)
tk.MustExec(`insert into tstring values ('a'), ('b'), ('c'), ('d')`)
tk.MustExec(`analyze table tint`)
tk.MustExec(`analyze table tdate`)
tk.MustExec(`analyze table tstring`)
var input []string
var output []struct {
SQL string
Results []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Results = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustQuery(tt).Check(testkit.Rows(output[i].Results...))
}
}
func TestListPartitionPruning(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database list_partition_pruning")
tk.MustExec("use list_partition_pruning")
tk.MustExec("drop table if exists tlist")
tk.MustExec(`set tidb_enable_list_partition = 1`)
tk.MustExec(`create table tlist (a int) partition by list (a) (
partition p0 values in (0, 1, 2),
partition p1 values in (3, 4, 5),
partition p2 values in (6, 7, 8),
partition p3 values in (9, 10, 11))`)
tk.MustExec(`create table tcollist (a int) partition by list columns(a) (
partition p0 values in (0, 1, 2),
partition p1 values in (3, 4, 5),
partition p2 values in (6, 7, 8),
partition p3 values in (9, 10, 11))`)
tk.MustExec(`analyze table tlist`)
tk.MustExec(`analyze table tcollist`)
var input []string
var output []struct {
SQL string
DynamicPlan []string
StaticPlan []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
output[i].DynamicPlan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
output[i].StaticPlan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery(tt).Check(testkit.Rows(output[i].DynamicPlan...))
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustQuery(tt).Check(testkit.Rows(output[i].StaticPlan...))
}
}
func TestListPartitionFunctions(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database list_partition_pruning")
tk.MustExec("use list_partition_pruning")
tk.MustExec("set tidb_enable_list_partition = 1")
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
var input []string
var output []struct {
SQL string
Results []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Results = nil
if strings.Contains(tt, "select") {
output[i].Results = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
}
})
if strings.Contains(tt, "select") {
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Results...))
} else {
tk.MustExec(tt)
}
}
}
func TestEstimationForTopNPushToDynamicPartition(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists tlist")
tk.MustExec(`set tidb_enable_list_partition = 1`)
tk.MustExec(`create table trange (a int, b int, c int, index ia(a), primary key (b) clustered)
partition by range(b) (
partition p1 values less than(100),
partition p2 values less than(200),
partition p3 values less than maxvalue);`)
tk.MustExec(`create table tlist (a int, b int, c int, index ia(a), primary key (b) clustered)
partition by list (b) (
partition p0 values in (0, 1, 2),
partition p1 values in (3, 4, 5));`)
tk.MustExec(`create table thash (a int, b int, c int, index ia(a), primary key (b) clustered)
partition by hash(b) partitions 4;`)
tk.MustExec(`create table t (a int, b int, c int, index ia(a), primary key (b) clustered);`)
tk.MustExec(`analyze table trange;`)
tk.MustExec(`analyze table tlist;`)
tk.MustExec(`analyze table thash;`)
tk.MustExec(`analyze table t;`)
var input []string
var output []struct {
SQL string
Plan []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...))
}
}
func TestPartitionTableExplain(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t (a int primary key, b int, key (b)) partition by hash(a) (partition P0, partition p1, partition P2)`)
tk.MustExec(`create table t2 (a int, b int)`)
tk.MustExec(`insert into t values (1,1),(2,2),(3,3)`)
tk.MustExec(`insert into t2 values (1,1),(2,2),(3,3)`)
tk.MustExec(`analyze table t, t2`)
var input []string
var output []struct {
SQL string
DynamicPlan []string
StaticPlan []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
output[i].DynamicPlan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
output[i].StaticPlan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery(tt).Check(testkit.Rows(output[i].DynamicPlan...))
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustQuery(tt).Check(testkit.Rows(output[i].StaticPlan...))
}
}
func TestBatchPointGetTablePartition(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table thash1(a int, b int, primary key(a,b) nonclustered) partition by hash(b) partitions 2")
tk.MustExec("insert into thash1 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table trange1(a int, b int, primary key(a,b) nonclustered) partition by range(b) (partition p0 values less than (2), partition p1 values less than maxvalue)")
tk.MustExec("insert into trange1 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table tlist1(a int, b int, primary key(a,b) nonclustered) partition by list(b) (partition p0 values in (0, 1), partition p1 values in (2, 3))")
tk.MustExec("insert into tlist1 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table thash2(a int, b int, primary key(a,b)) partition by hash(b) partitions 2")
tk.MustExec("insert into thash2 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table trange2(a int, b int, primary key(a,b)) partition by range(b) (partition p0 values less than (2), partition p1 values less than maxvalue)")
tk.MustExec("insert into trange2 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table tlist2(a int, b int, primary key(a,b)) partition by list(b) (partition p0 values in (0, 1), partition p1 values in (2, 3))")
tk.MustExec("insert into tlist2 values(1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create table thash3(a int, b int, primary key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into thash3 values(1,0),(2,0),(3,0),(4,0)")
tk.MustExec("create table trange3(a int, b int, primary key(a)) partition by range(a) (partition p0 values less than (3), partition p1 values less than maxvalue)")
tk.MustExec("insert into trange3 values(1,0),(2,0),(3,0),(4,0)")
tk.MustExec("create table tlist3(a int, b int, primary key(a)) partition by list(a) (partition p0 values in (0, 1, 2), partition p1 values in (3, 4, 5))")
tk.MustExec("insert into tlist3 values(1,0),(2,0),(3,0),(4,0)")
tk.MustExec("create table issue45889(a int) partition by list(a) (partition p0 values in (0, 1), partition p1 values in (2, 3))")
tk.MustExec("insert into issue45889 values (0),(0),(1),(1),(2),(2),(3),(3)")
var input []string
var output []struct {
SQL string
DynamicPlan []string
StaticPlan []string
Result []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
output[i].DynamicPlan = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
dynamicQuery := tk.MustQuery(tt)
if !strings.Contains(tt, "order by") {
dynamicQuery = dynamicQuery.Sort()
}
dynamicRes := testdata.ConvertRowsToStrings(dynamicQuery.Rows())
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
output[i].StaticPlan = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
staticQuery := tk.MustQuery(tt)
if !strings.Contains(tt, "order by") {
staticQuery = staticQuery.Sort()
}
staticRes := testdata.ConvertRowsToStrings(staticQuery.Rows())
require.Equal(t, dynamicRes, staticRes)
output[i].Result = staticRes
})
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].DynamicPlan...))
if strings.Contains(tt, "order by") {
tk.MustQuery(tt).Check(testkit.Rows(output[i].Result...))
} else {
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Result...))
}
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].StaticPlan...))
if strings.Contains(tt, "order by") {
tk.MustQuery(tt).Check(testkit.Rows(output[i].Result...))
} else {
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Result...))
}
}
}
func TestBatchPointGetPartitionForAccessObject(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(a int, b int, UNIQUE KEY (b)) PARTITION BY HASH(b) PARTITIONS 4")
tk.MustExec("insert into t1 values(1, 1), (2, 2), (3, 3), (4, 4)")
tk.MustExec("CREATE TABLE t2 (id int primary key, name_id int) PARTITION BY LIST(id) (" +
"partition p0 values IN (1, 2), " +
"partition p1 values IN (3, 4), " +
"partition p3 values IN (5))")
tk.MustExec("insert into t2 values(1, 1), (2, 2), (3, 3), (4, 4)")
tk.MustExec("CREATE TABLE t3 (id int primary key, name_id int) PARTITION BY LIST COLUMNS(id) (" +
"partition p0 values IN (1, 2), " +
"partition p1 values IN (3, 4), " +
"partition p3 values IN (5))")
tk.MustExec("insert into t3 values(1, 1), (2, 2), (3, 3), (4, 4)")
tk.MustExec("CREATE TABLE t4 (id int, name_id int, unique key(id, name_id)) PARTITION BY LIST COLUMNS(id, name_id) (" +
"partition p0 values IN ((1, 1),(2, 2)), " +
"partition p1 values IN ((3, 3),(4, 4)), " +
"partition p3 values IN ((5, 5)))")
tk.MustExec("insert into t4 values(1, 1), (2, 2), (3, 3), (4, 4)")
tk.MustExec("CREATE TABLE t5 (id int, name varchar(10), unique key(id, name)) PARTITION BY LIST COLUMNS(id, name) (" +
"partition p0 values IN ((1,'a'),(2,'b')), " +
"partition p1 values IN ((3,'c'),(4,'d')), " +
"partition p3 values IN ((5,'e')))")
tk.MustExec("insert into t5 values(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')")
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
var input []string
var output []struct {
SQL string
Plan []string
}
integrationPartitionSuiteData := getIntegrationPartitionSuiteData()
integrationPartitionSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...))
}
}
|
package leetcode
import "testing"
func TestRobb(t *testing.T) {
t.Log(robb([]int{1, 2, 3, 1}))
t.Log(robb([]int{2, 7, 9, 3, 1}))
}
|
package fakes
import "errors"
type NoopWriter struct{}
func (no NoopWriter) Write(b []byte) (n int, err error) {
return 0, errors.New("explosions")
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
/*
This is a sample that replicates the try lock pattern using Go.
In this scenario we have two potential callers for the same resource - as specified by the jobFunc() - but only
one of them can access it at any one time. If the resource is in use, then don't wait, but do nothing.
This sample sychronises the call to jobFunc() using a channel, when the channel contains something, then the resource
can be used, when the chnnel is empty, then the resource is busy.
*/
type sig int
const (
OKAY sig = iota + 1
)
var (
signal = make(chan sig)
)
func init() {
fmt.Println("Initialising the app")
rand.Seed(42)
}
func main() {
// You need to initialise the signal channel, but do it in a go func
go func() { signal <- OKAY }()
// Set the first func going, calling every second
go differentCallers()
// wait to start the second func
time.Sleep(500 * time.Millisecond)
// Set the second a caller func going...
differentCallers()
}
func differentCallers() {
// This is the main loop that calls the job func
for {
fmt.Println("Okay..")
time.Sleep(time.Second)
if tryLock() {
go jobFunc()
}
}
}
func tryLock() bool {
select {
case msg := <-signal:
fmt.Println("received message", msg)
return true
default:
fmt.Println("no message received - doing nothing")
return false
}
}
func jobFunc() {
// This mimics doing some work - pick a duration in millis to wait
dur := time.Duration(rand.Intn(2000))
fmt.Printf("In job func.. duration of job will be %d\n", dur)
time.Sleep(dur * time.Millisecond)
signal <- OKAY
}
|
package application
import (
"github.com/angryronald/guestlist/internal/guest/application/command"
"github.com/angryronald/guestlist/internal/guest/application/query"
"github.com/angryronald/guestlist/internal/guest/domain/service/guest"
)
type Commands struct {
AddGuest command.AddGuestCommand
GuestArrived command.GuestArrivedCommand
GuestLeaves command.GuestLeavesCommand
}
type Queries struct {
CountEmptySeats query.CountEmptySeatsQuery
ListArrivedGuests query.ListArrivedGuestsQuery
ListGuests query.ListGuestsQuery
}
type Application struct {
Commands Commands
Queries Queries
}
func New(
service guest.ServiceInterface,
) Application {
return Application{
Commands: Commands{
AddGuest: command.NewAddGuestCommand(service),
GuestArrived: command.NewGuestArrivedCommand(service),
GuestLeaves: command.NewGuestLeavesCommand(service),
},
Queries: Queries{
CountEmptySeats: query.NewCountEmptySeatsQuery(service),
ListArrivedGuests: query.NewListArrivedGuestsQuery(service),
ListGuests: query.NewListGuestsQuery(service),
},
}
}
|
package ora2uml
import (
"database/sql"
"fmt"
"os"
_ "github.com/godror/godror"
)
const (
sqlAllTables = `
select
owner, table_name
from
all_tables
`
)
func readTablesSql(tables []ConfigTable) string {
sql := "select "
sql += "t.owner, t.table_name, c.comments "
sql += "from all_tables t "
sql += "left outer join all_tab_comments c on t.owner = c.owner and t.table_name = c.table_name "
sql += "where (t.owner, t.table_name) in ("
for idx, table := range tables {
if idx > 0 {
sql += ","
}
sql += fmt.Sprintf("('%s','%s')", table.Owner, table.Name)
}
sql += ")"
return sql
}
func ReadTables(config Config) (Model, error) {
query := readTablesSql(config.Tables)
db, err := sql.Open("godror", config.ConnectionString())
if err != nil {
fmt.Println("ReadTables:", err)
os.Exit(0)
}
defer db.Close()
rows, err := db.Query(query)
if err != nil {
fmt.Println("ReadTables: Error running query")
fmt.Println(err)
os.Exit(0)
}
model := &Model{}
for rows.Next() {
table := &ModelTable{}
rows.Scan(&table.Owner, &table.TableName, &table.Comments)
fmt.Println(table.Owner, table.TableName)
model.AddTable(*table)
}
return *model, nil
}
|
package schedule
import (
"math/rand"
"time"
)
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
}
// RandomInterval defines a random interval schedule.
type RandomInterval struct {
Interval time.Duration
Randomness float64
}
// EveryRandom takes an interval with an ajustable plus or minus percentage of this interval.
// The plusOrMinus paramter should be between 0 and 1.
// It returns a Schedule.
// For example, EveryRandom(4*time.Second, 0.5) will return a Schedule that can return between 2 and 6 seconds.
func EveryRandom(interval time.Duration, plusOrMinus float64) RandomInterval {
if interval < time.Second {
interval = time.Second
}
if plusOrMinus > 1 {
plusOrMinus = 1
}
if plusOrMinus < 0 {
plusOrMinus = 0
}
return RandomInterval{
Interval: interval,
Randomness: plusOrMinus,
}
}
func (r RandomInterval) RandomRange() (min, max float64) {
min = r.Interval.Seconds() - r.Interval.Seconds()*r.Randomness
max = r.Interval.Seconds() + r.Interval.Seconds()*r.Randomness
return min, max
}
func (r RandomInterval) Next(now time.Time) time.Time {
min, max := r.RandomRange()
return now.Add(time.Duration(randomInt(min, max)) * time.Second)
}
func randomInt(min, max float64) int64 {
return rand.Int63n(int64(max-min)) + int64(min)
}
|
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// StepDurationSummary report the step execution duration summary.
StepDurationSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "step_duration_ms",
Help: "step latency distributions.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
ConstLabels: prometheus.Labels{},
}, []string{"application", "workflow_revision", "step_name", "step_type"})
)
func init() {
if err := metrics.Registry.Register(StepDurationSummary); err != nil {
klog.Error(err)
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webservice
import (
"context"
restful "github.com/emicklei/go-restful/v3"
"github.com/oam-dev/kubevela/pkg/apiserver/log"
"github.com/oam-dev/kubevela/pkg/apiserver/model"
apis "github.com/oam-dev/kubevela/pkg/apiserver/rest/apis/v1"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/usecase"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/utils"
"github.com/oam-dev/kubevela/pkg/apiserver/rest/utils/bcode"
)
type workflowWebService struct {
workflowUsecase usecase.WorkflowUsecase
applicationUsecase usecase.ApplicationUsecase
}
func (w *workflowWebService) workflowCheckFilter(req *restful.Request, res *restful.Response, chain *restful.FilterChain) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
workflow, err := w.workflowUsecase.GetWorkflow(req.Request.Context(), app, req.PathParameter("workflowName"))
if err != nil {
bcode.ReturnError(req, res, err)
return
}
req.Request = req.Request.WithContext(context.WithValue(req.Request.Context(), &apis.CtxKeyWorkflow, workflow))
chain.ProcessFilter(req, res)
}
func (w *workflowWebService) listApplicationWorkflows(req *restful.Request, res *restful.Response) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
workflows, err := w.workflowUsecase.ListApplicationWorkflow(req.Request.Context(), app)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(apis.ListWorkflowResponse{Workflows: workflows}); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) createOrUpdateApplicationWorkflow(req *restful.Request, res *restful.Response) {
// Verify the validity of parameters
var createReq apis.CreateWorkflowRequest
if err := req.ReadEntity(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&createReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
// Call the usecase layer code
workflowDetail, err := w.workflowUsecase.CreateOrUpdateWorkflow(req.Request.Context(), app, createReq)
if err != nil {
log.Logger.Errorf("create application failure %s", err.Error())
bcode.ReturnError(req, res, err)
return
}
// Write back response data
if err := res.WriteEntity(workflowDetail); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) detailWorkflow(req *restful.Request, res *restful.Response) {
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
detail, err := w.workflowUsecase.DetailWorkflow(req.Request.Context(), workflow)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(detail); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) updateWorkflow(req *restful.Request, res *restful.Response) {
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
// Verify the validity of parameters
var updateReq apis.UpdateWorkflowRequest
if err := req.ReadEntity(&updateReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := validate.Struct(&updateReq); err != nil {
bcode.ReturnError(req, res, err)
return
}
detail, err := w.workflowUsecase.UpdateWorkflow(req.Request.Context(), workflow, updateReq)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(detail); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) deleteWorkflow(req *restful.Request, res *restful.Response) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
if err := w.workflowUsecase.DeleteWorkflow(req.Request.Context(), app, req.PathParameter("workflowName")); err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) listWorkflowRecords(req *restful.Request, res *restful.Response) {
page, pageSize, err := utils.ExtractPagingParams(req, minPageSize, maxPageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
records, err := w.workflowUsecase.ListWorkflowRecords(req.Request.Context(), workflow, page, pageSize)
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(records); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) detailWorkflowRecord(req *restful.Request, res *restful.Response) {
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
record, err := w.workflowUsecase.DetailWorkflowRecord(req.Request.Context(), workflow, req.PathParameter("record"))
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(record); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) resumeWorkflowRecord(req *restful.Request, res *restful.Response) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
err := w.workflowUsecase.ResumeRecord(req.Request.Context(), app, workflow, req.PathParameter("record"))
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) terminateWorkflowRecord(req *restful.Request, res *restful.Response) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
err := w.workflowUsecase.TerminateRecord(req.Request.Context(), app, workflow, req.PathParameter("record"))
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
func (w *workflowWebService) rollbackWorkflowRecord(req *restful.Request, res *restful.Response) {
app := req.Request.Context().Value(&apis.CtxKeyApplication).(*model.Application)
workflow := req.Request.Context().Value(&apis.CtxKeyWorkflow).(*model.Workflow)
err := w.workflowUsecase.RollbackRecord(req.Request.Context(), app, workflow, req.PathParameter("record"), req.QueryParameter("rollbackVersion"))
if err != nil {
bcode.ReturnError(req, res, err)
return
}
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
bcode.ReturnError(req, res, err)
return
}
}
|
package tests
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"themis/client"
)
var _ = Describe("Space Service", func() {
BeforeEach(func() {
//space = *NewSpace()
})
Describe("Querying Space Service", func() {
Context("With no parameters", func() {
It("should return a valid space", func() {
resultSpace, rawData, err := client.GetSpace(configuration.ServiceURL, SpaceID)
Expect(err).Should(BeNil())
resultID := ((rawData["data"].(map[string]interface{})["id"])).(string)
Expect(resultID).Should(Equal(SpaceID))
Expect(resultSpace.ID.Hex()).Should(Equal(SpaceID))
})
})
})
})
|
package main
import (
"fmt"
"net/http"
"os"
"time"
"github.com/dgrijalva/jwt-go"
)
func createJWTtoken(login string) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"login": login,
"exp": time.Now().AddDate(0, 1, 0).Unix(),
})
tokenString, err := token.SignedString([]byte(os.Getenv("JWT_KEY")))
if err != nil {
return "", fmt.Errorf("cannot get signed token string: %v", err)
}
return tokenString, nil
}
func getJWTtokenFromCookies(cookies []*http.Cookie) (*jwt.Token, error) {
var jwtCookie *http.Cookie
for _, c := range cookies {
if c.Name == "jwt" {
jwtCookie = c
}
}
if jwtCookie == nil {
return nil, fmt.Errorf("jwt token was not found in cookies.")
}
return parseJWTtoken(jwtCookie.Value)
}
func parseJWTtoken(token string) (*jwt.Token, error) {
return jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
jwtKey := os.Getenv("JWT_KEY")
if jwtKey == "" {
return nil, fmt.Errorf("JWT_KEY was not found in env")
}
return []byte(jwtKey), nil
})
}
|
/*
Copyright 2018 The HAWQ Team.
*/
package internalversion
type MyResourceExpansion interface{}
|
package testutil
import (
"math/rand"
"time"
ci "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto"
)
func RandTestKeyPair(typ, bits int) (ci.PrivKey, ci.PubKey, error) {
return SeededTestKeyPair(typ, bits, time.Now().UnixNano())
}
func SeededTestKeyPair(typ, bits int, seed int64) (ci.PrivKey, ci.PubKey, error) {
r := rand.New(rand.NewSource(seed))
return ci.GenerateKeyPairWithReader(typ, bits, r)
}
|
package main
import "fmt"
type Shape interface {
Area() int
}
type Square struct {
width int
}
type Rectangle struct {
width int
height int
}
func (s Square) Area() int {
return s.width * s.width
}
func (r Rectangle) Area() int {
return r.width * r.height
}
func recordArea(shape Shape, areas map[Shape]int, ch chan bool) {
areas[shape] = shape.Area()
ch <- true
}
func main() {
shapes := []Shape{
Square{2},
Rectangle{2, 3},
Square{3},
}
areas := make(map[Shape]int)
ch := make(chan bool)
for _, shape := range shapes {
go recordArea(shape, areas, ch)
}
for range shapes {
<-ch
}
for shape, area := range areas {
fmt.Println(shape, area)
}
}
|
package data
const (
DB_USER = "user"
DB_PASSWORD = "password"
DB_NAME = "chit"
)
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consumer
import (
"strings"
"stathat.com/c/consistent"
"github.com/apache/rocketmq-client-go/v2/internal/utils"
"github.com/apache/rocketmq-client-go/v2/primitive"
"github.com/apache/rocketmq-client-go/v2/rlog"
)
// Strategy Algorithm for message allocating between consumers
// An allocate strategy proxy for based on machine room nearside priority. An actual allocate strategy can be
// specified.
//
// If any consumer is alive in a machine room, the message queue of the broker which is deployed in the same machine
// should only be allocated to those. Otherwise, those message queues can be shared along all consumers since there are
// no alive consumer to monopolize them.
//
// Average Hashing queue algorithm
// Cycle average Hashing queue algorithm
// Use Message QueueID specified
// Computer room Hashing queue algorithm, such as Alipay logic room
// Consistent Hashing queue algorithm
type AllocateStrategy func(string, string, []*primitive.MessageQueue, []string) []*primitive.MessageQueue
func AllocateByAveragely(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue,
cidAll []string) []*primitive.MessageQueue {
if currentCID == "" || len(mqAll) == 0 || len(cidAll) == 0 {
return nil
}
var (
find bool
index int
)
for idx := range cidAll {
if cidAll[idx] == currentCID {
find = true
index = idx
break
}
}
if !find {
rlog.Warning("[BUG] ConsumerId not in cidAll", map[string]interface{}{
rlog.LogKeyConsumerGroup: consumerGroup,
"consumerId": currentCID,
"cidAll": cidAll,
})
return nil
}
mqSize := len(mqAll)
cidSize := len(cidAll)
mod := mqSize % cidSize
var averageSize int
if mqSize <= cidSize {
averageSize = 1
} else {
if mod > 0 && index < mod {
averageSize = mqSize/cidSize + 1
} else {
averageSize = mqSize / cidSize
}
}
var startIndex int
if mod > 0 && index < mod {
startIndex = index * averageSize
} else {
startIndex = index*averageSize + mod
}
num := utils.MinInt(averageSize, mqSize-startIndex)
result := make([]*primitive.MessageQueue, 0)
for i := 0; i < num; i++ {
result = append(result, mqAll[(startIndex+i)%mqSize])
}
return result
}
func AllocateByAveragelyCircle(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue,
cidAll []string) []*primitive.MessageQueue {
if currentCID == "" || len(mqAll) == 0 || len(cidAll) == 0 {
return nil
}
var (
find bool
index int
)
for idx := range cidAll {
if cidAll[idx] == currentCID {
find = true
index = idx
break
}
}
if !find {
rlog.Warning("[BUG] ConsumerId not in cidAll", map[string]interface{}{
rlog.LogKeyConsumerGroup: consumerGroup,
"consumerId": currentCID,
"cidAll": cidAll,
})
return nil
}
result := make([]*primitive.MessageQueue, 0)
for i := index; i < len(mqAll); i++ {
if i%len(cidAll) == index {
result = append(result, mqAll[i])
}
}
return result
}
// TODO
func AllocateByMachineNearby(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue,
cidAll []string) []*primitive.MessageQueue {
return AllocateByAveragely(consumerGroup, currentCID, mqAll, cidAll)
}
func AllocateByConfig(list []*primitive.MessageQueue) AllocateStrategy {
return func(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue, cidAll []string) []*primitive.MessageQueue {
return list
}
}
func AllocateByMachineRoom(consumeridcs []string) AllocateStrategy {
return func(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue, cidAll []string) []*primitive.MessageQueue {
if currentCID == "" || len(mqAll) == 0 || len(cidAll) == 0 {
return nil
}
var (
find bool
index int
)
for idx := range cidAll {
if cidAll[idx] == currentCID {
find = true
index = idx
break
}
}
if !find {
rlog.Warning("[BUG] ConsumerId not in cidAll", map[string]interface{}{
rlog.LogKeyConsumerGroup: consumerGroup,
"consumerId": currentCID,
"cidAll": cidAll,
})
return nil
}
var premqAll []*primitive.MessageQueue
for _, mq := range mqAll {
temp := strings.Split(mq.BrokerName, "@")
if len(temp) == 2 {
for _, idc := range consumeridcs {
if idc == temp[0] {
premqAll = append(premqAll, mq)
}
}
}
}
mod := len(premqAll) / len(cidAll)
rem := len(premqAll) % len(cidAll)
startIndex := mod * index
endIndex := startIndex + mod
result := make([]*primitive.MessageQueue, 0)
for i := startIndex; i < endIndex; i++ {
result = append(result, mqAll[i])
}
if rem > index {
result = append(result, premqAll[index+mod*len(cidAll)])
}
return result
}
}
func AllocateByConsistentHash(virtualNodeCnt int) AllocateStrategy {
return func(consumerGroup, currentCID string, mqAll []*primitive.MessageQueue, cidAll []string) []*primitive.MessageQueue {
if currentCID == "" || len(mqAll) == 0 || len(cidAll) == 0 {
return nil
}
var (
find bool
)
for idx := range cidAll {
if cidAll[idx] == currentCID {
find = true
break
}
}
if !find {
rlog.Warning("[BUG] ConsumerId not in cidAll", map[string]interface{}{
rlog.LogKeyConsumerGroup: consumerGroup,
"consumerId": currentCID,
"cidAll": cidAll,
})
return nil
}
c := consistent.New()
c.NumberOfReplicas = virtualNodeCnt
for _, cid := range cidAll {
c.Add(cid)
}
result := make([]*primitive.MessageQueue, 0)
for _, mq := range mqAll {
clientNode, err := c.Get(mq.String())
if err != nil {
rlog.Warning("[BUG] AllocateByConsistentHash err: %s", map[string]interface{}{
rlog.LogKeyUnderlayError: err,
})
}
if currentCID == clientNode {
result = append(result, mq)
}
}
return result
}
}
|
package testutil
import (
"context"
"testing"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"tagallery.com/api/mongodb"
)
// DropCollection deletes all collections in a MongoDB database.
func DropCollection(db string, collection string) error {
client := mongodb.Client()
_, err := client.Database(db).Collection(collection).DeleteMany(context.Background(), bson.M{}, options.Delete())
return err
}
// CleanCollection drops a db via DropCollection() and then logs potential errors.
func CleanCollection(t *testing.T, db string, collection string) {
if err := DropCollection(db, collection); err != nil {
format, args := FormatTestError(
"Failed to drop the database.",
map[string]interface{}{
"error": err,
})
t.Errorf(format, args...)
}
}
|
package sqlite
import (
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/elitah/utils/logs"
)
type sqliteTableInfo struct {
name string
sql string
sync bool
cnt int64
}
func SQLiteSync(master, slave *sql.DB, dir string) (int64, error) {
if err := master.Ping(); nil == err {
if err := slave.Ping(); nil == err {
var list []*sqliteTableInfo
var tbl_name, sql string
if rows, err := master.Query("SELECT tbl_name, sql FROM sqlite_master WHERE type=='table';"); nil == err {
// 遍历结果
for rows.Next() {
if err := rows.Scan(&tbl_name, &sql); nil == err {
if "sqlite_sequence" != tbl_name && "" != sql {
list = append(list, &sqliteTableInfo{
name: tbl_name,
sql: sql,
})
}
} else {
return 0, err
}
}
//
rows.Close()
//
for _, item := range list {
if row := slave.QueryRow("SELECT sql FROM sqlite_master WHERE type=='table' AND tbl_name==?;", item.name); nil != row {
if err := row.Scan(&sql); nil == err {
//
item.sync = sql == item.sql
//
if row := slave.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s;", item.name)); nil != row {
row.Scan(&item.cnt)
}
} else {
//logs.Info(err)
}
}
}
//
cnt := int64(0)
//
for _, item := range list {
if 0 < item.cnt {
if item.sync {
//...
logs.Info("正在同步: ", item.name)
//
if rows, err := slave.Query(fmt.Sprintf("SELECT * FROM %s;", item.name)); nil == err {
if types, err := rows.ColumnTypes(); nil == err {
if tx, err := master.Begin(); nil == err {
sql := "INSERT INTO " + item.name + " ("
for i, _ := range types {
if 0 != i {
sql += ", "
}
sql += types[i].Name()
}
sql += ") VALUES ("
for i, _ := range types {
if 0 != i {
sql += ", "
}
sql += "?"
}
sql += ");"
if stmt, err := tx.Prepare(sql); nil == err {
value := make([]interface{}, len(types))
for i, _ := range value {
switch strings.ToLower(types[i].DatabaseTypeName()) {
case "integer":
value[i] = new(int64)
case "float":
value[i] = new(float64)
case "blob":
value[i] = new([]byte)
case "text":
value[i] = new(string)
case "timestamp", "datetime", "date":
value[i] = new(time.Time)
case "boolean":
value[i] = new(bool)
default:
logs.Error("不支持的类型:", types[i].DatabaseTypeName())
}
}
//
for rows.Next() {
if err := rows.Scan(value...); nil == err {
if _, err := stmt.Exec(value...); nil == err {
cnt++
} else {
logs.Error(err)
}
} else {
logs.Error(err)
}
}
//
if err := tx.Commit(); nil != err {
logs.Error(err)
}
//
stmt.Close()
} else {
logs.Error(err)
}
} else {
logs.Error(err)
}
} else {
logs.Error(err)
}
rows.Close()
} else {
logs.Error(err)
}
} else if "" != dir {
//...
logs.Warn("正在备份: ", item.name)
//
list := make([]interface{}, 0, int(item.cnt))
//
if rows, err := slave.Query(fmt.Sprintf("SELECT * FROM %s;", item.name)); nil == err {
if types, err := rows.ColumnTypes(); nil == err {
for rows.Next() {
value := make([]interface{}, len(types))
for i, _ := range value {
switch strings.ToLower(types[i].DatabaseTypeName()) {
case "integer":
value[i] = new(int64)
case "float":
value[i] = new(float64)
case "blob":
value[i] = new([]byte)
case "text":
value[i] = new(string)
case "timestamp", "datetime", "date":
value[i] = new(time.Time)
case "boolean":
value[i] = new(bool)
default:
logs.Info("不支持的类型:", types[i].DatabaseTypeName())
}
}
if err := rows.Scan(value...); nil == err {
list = append(list, value)
}
}
} else {
logs.Error(err)
}
//
rows.Close()
} else {
logs.Error(err)
}
if data, err := json.Marshal(&struct {
SQL string `json:"sql"`
Count int64 `json:"count"`
List interface{} `json:"list"`
}{
SQL: item.sql,
Count: item.cnt,
List: list,
}); nil == err {
ioutil.WriteFile(
fmt.Sprintf(
"%s/sqlite_backup_%s_%d.json",
dir,
item.name,
time.Now().Unix(),
),
data,
0644,
)
} else {
logs.Error(err)
}
}
}
}
return cnt, nil
} else {
return 0, err
}
} else {
return 0, err
}
} else {
return 0, err
}
}
|
package Account
import (
"context"
"log"
"math/big"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
)
func GetAddressFromHex(address string) common.Address {
return common.HexToAddress(address)
}
// in wei precision (18 points)
func GetAccountBalance(client *ethclient.Client, account common.Address) *big.Int {
balance, err := client.BalanceAt(context.Background(), account, nil)
if err != nil {
log.Fatal("error at getAccountBalance", err)
}
return balance
}
func GetAccountBalanceAtBlockTime(client *ethclient.Client, account common.Address, blockNumber *big.Int) *big.Int {
balanceAtBlockTime, err := client.BalanceAt(context.Background(), account, blockNumber)
if err != nil {
log.Fatal("error at getAccountBalanceAtBlockTime", err)
}
return balanceAtBlockTime
}
func GetAccountPendingBalance(client *ethclient.Client, account common.Address) *big.Int {
pendingBalance, err := client.PendingBalanceAt(context.Background(), account)
if err != nil {
log.Fatal("error at getAccountPendingBalance", err)
}
return pendingBalance
}
func GetAccountAddressHex (accountAddress common.Address) string {
return accountAddress.Hex()
}
func GetAccountAddressHashHex (accountAddress common.Address) string {
return accountAddress.Hash().Hex()
}
func GetAccountAddressBytes (accountAddress common.Address) []byte {
return accountAddress.Bytes()
}
func GetAccountAddress(account accounts.Account) common.Address {
return account.Address
}
func GetAccountPendingNonce(client *ethclient.Client, accountAddress common.Address) uint64 {
nonce, err := client.PendingNonceAt(context.Background(), accountAddress)
if err != nil {
log.Fatal("error getting account pending nonce", err)
}
return nonce
} |
package memory
import (
"fmt"
"testing"
"github.com/mateeullahmalik/goa-demo/internal/storage"
"github.com/stretchr/testify/assert"
)
// newTestDB. need keep as private method to prevent CI error:
// exported func NewTestDB returns unexported type *memory.keyValue, which can be annoying to use
func newTestDB() *keyValue {
return &keyValue{
values: map[string][]byte{
"exist": []byte("bar"),
},
}
}
func TestGet(t *testing.T) {
t.Parallel()
testCases := []struct {
key string
expectedError error
expectedValue []byte
}{
{
key: "exist",
expectedError: nil,
expectedValue: []byte("bar"),
}, {
key: "not-exist",
expectedError: storage.ErrKeyValueNotFound,
expectedValue: nil,
},
}
t.Run("group", func(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
testName := fmt.Sprintf("key:%s/value:%v/err:%v", testCase.key, testCase.expectedValue, testCase.expectedError)
t.Run(testName, func(t *testing.T) {
t.Parallel()
db := newTestDB()
val, err := db.Get(testCase.key)
assert.Equal(t, testCase.expectedError, err)
assert.Equal(t, testCase.expectedValue, val)
})
}
})
}
func TestSet(t *testing.T) {
t.Parallel()
testCases := []struct {
key string
value []byte
expectedError error
}{
{
key: "exist",
value: []byte("baz"),
expectedError: nil,
}, {
key: "foo",
value: []byte("grid"),
expectedError: nil,
},
}
t.Run("group", func(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
testName := fmt.Sprintf("key:%s/value:%v/err:%v", testCase.key, testCase.value, testCase.expectedError)
t.Run(testName, func(t *testing.T) {
t.Parallel()
db := newTestDB()
err := db.Set(testCase.key, testCase.value)
assert.Equal(t, testCase.expectedError, err)
value, ok := db.values[testCase.key]
assert.True(t, ok, "not found new key")
assert.Equal(t, testCase.value, value)
})
}
})
}
func TestDelete(t *testing.T) {
t.Parallel()
testCases := []struct {
key string
expectedError error
}{
{
key: "exist",
expectedError: nil,
}, {
key: "not-exist",
expectedError: nil,
},
}
t.Run("group", func(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
testName := fmt.Sprintf("key:%s/err:%v", testCase.key, testCase.expectedError)
t.Run(testName, func(t *testing.T) {
t.Parallel()
db := newTestDB()
err := db.Delete(testCase.key)
_, ok := db.values[testCase.key]
assert.Equal(t, testCase.expectedError, err)
assert.False(t, ok, "found deleted key")
})
}
})
}
|
package main
import (
"go_restful/user"
"log"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
func initDB() *gorm.DB {
db, err := gorm.Open("mysql", "root@/gorest?parseTime=true")
if err != nil {
log.Fatalln(err)
}
db.AutoMigrate(&user.User{})
return db
}
func main() {
db := initDB()
defer db.Close()
userAPI := InitUserApi(db)
route := gin.Default()
route.GET("/users", userAPI.FindAll)
route.GET("/user/:id", userAPI.FindByID)
route.POST("/user/create", userAPI.Create)
route.DELETE("/user/delete/:id", userAPI.Delete)
err := route.Run()
if err != nil {
log.Fatalln(err)
}
}
|
package util
import (
"errors"
"example.com/selenium/config"
"fmt"
"github.com/tebeka/selenium"
"net"
"os"
)
type Crawler struct {
ChromeDriver string
Port int
Service *selenium.Service
Caps selenium.Capabilities
}
// NewCrawler 开启驱动服务
func NewCrawler() (*Crawler, error) {
port, _ := pickUnusedPort()
crawler := &Crawler{
//google浏览器驱动
ChromeDriver: config.VIPER.GetString(config.DRIVER_PATH),
Port: port,
Service: nil,
}
opts := []selenium.ServiceOption{
selenium.Output(os.Stderr),
}
selenium.SetDebug(false)
service, err := selenium.NewChromeDriverService(crawler.ChromeDriver, crawler.Port, opts...)
if nil != err {
return nil, errors.New("start a chromedriver service failed," + err.Error())
}
imagCaps := map[string]interface{}{
"profile.managed_default_content_settings.images": 2, //不加载图片,提高浏览器响应速度
}
caps := selenium.Capabilities{
"browserName": "msedge",
"excludeSwitches": [1]string{"enable-automation"},
"args": []string{
"--headless",
"--start-maximized",
"--no-sandbox",
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
},
"prefs": imagCaps,
}
//chromeCaps := chrome.Capabilities{
// Prefs: imagCaps,
// Path: "",
//}
////以上是设置浏览器参数
//caps.AddChrome(chromeCaps)
crawler.Service = service
crawler.Caps = caps
return crawler, nil
}
// NewRemote 打开窗口
func (c *Crawler) NewRemote() (selenium.WebDriver, error) {
wB1, err := selenium.NewRemote(c.Caps, fmt.Sprintf("http://localhost:%d/wd/hub", c.Port))
if err != nil {
return nil, errors.New("connect to the webDriver failed," + err.Error())
}
return wB1, nil
}
// Shutdown 关闭驱动服务
func (c *Crawler) Shutdown() {
_ = c.Service.Stop()
}
func pickUnusedPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
port := l.Addr().(*net.TCPAddr).Port
if err := l.Close(); err != nil {
return 0, err
}
return port, nil
}
|
// bytes.
package main
import (
"fmt"
)
func main() {
const str = "hello world"
sl := make([]byte, len(str))
sl = []byte(str[:5])
fmt.Printf("sl=%s len=%d\n\n", sl, len(sl))
fmt.Printf("sl[:3]=%s\n", sl[:3])
fmt.Printf("sl[3:]=%s\n\n", sl[3:])
fmt.Printf("len(sl)/2=%d\n", len(sl)/2)
fmt.Printf("sl[:len(sl)/2]=%s\n\n", sl[:len(sl)/2])
fmt.Printf("%T %+v\n", sl, sl)
fmt.Printf("%T %+v\n", sl[0], sl[0])
fmt.Printf("%T %+v\n\n", sl[:2], sl[:2])
sl = append(sl, ':', 'a', 'p', 'p')
sl = append(sl, "ended"...)
fmt.Printf("sl=%s len=%d\n\n", sl, len(sl))
}
|
package consul
import (
"strings"
consulapi "github.com/hashicorp/consul/api"
)
type ConsulClient struct {
underlying *consulapi.Client
}
type Config struct {
UseSSL bool
Host string
}
func (c ConsulClient) Catalog() *consulapi.Catalog {
return c.underlying.Catalog()
}
func (c ConsulClient) KV() *consulapi.KV {
return c.underlying.KV()
}
func (c ConsulClient) Health() *consulapi.Health {
return c.underlying.Health()
}
func (c Config) GetClient() (ConsulClient, error) {
scheme := "http"
if c.UseSSL {
scheme = "https"
}
cfg := &consulapi.Config{
Address: c.Host,
Scheme: scheme,
}
client, e := consulapi.NewClient(cfg)
return ConsulClient{client}, e
}
func GetServicesKVs(kvp consulapi.KVPairs, filters []string) map[string]consulapi.KVPairs {
result := make(map[string]consulapi.KVPairs, 0)
for _, kv := range kvp {
pathSplit := strings.Split(kv.Key, "/")
for _, filter := range filters {
if strings.Contains(pathSplit[len(pathSplit)-1], filter) {
result[filter] = append(result[filter], kv)
}
}
}
return result
}
func GetServiceKVs(kvp consulapi.KVPairs, service string) consulapi.KVPairs {
result := make(consulapi.KVPairs, 0)
for _, kv := range kvp {
pathSplit := strings.Split(kv.Key, "/")
if pathSplit[1] == service {
result = append(result, kv)
}
}
return result
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Stack struct {
buffer []*TreeNode
}
func NewStack() *Stack {
return &Stack{make([]*TreeNode, 0)}
}
func (stk *Stack) Push(node *TreeNode) {
stk.buffer = append(stk.buffer, node)
}
func (stk *Stack) Pop() (lastNode *TreeNode) {
if stk.isEmpty() {
lastNode = nil
return
}
lastNode = stk.getLastNode()
stk.removeLastNode()
return
}
func (stk *Stack) isEmpty() bool {
return stk.length() == 0
}
func (stk *Stack) length() int {
return len(stk.buffer)
}
func (stk *Stack) getLastNode() *TreeNode {
return stk.buffer[stk.length()-1]
}
func (stk *Stack) removeLastNode() {
stk.buffer= stk.buffer[:stk.length()-1]
}
type BSTIterator struct {
stack *Stack
}
func Constructor(root *TreeNode) BSTIterator {
bsi := BSTIterator{NewStack()}
bsi.iterate(root)
return bsi
}
/** @return the next smallest number */
func (bsi *BSTIterator) Next() int {
// the job has pointed that Next() is always valid
treeNodeOfSmallestNumber := bsi.getTreeNodeOfSmallestNumber()
bsi.iterate(treeNodeOfSmallestNumber.Right)
return treeNodeOfSmallestNumber.Val
}
/** @return whether we have a next smallest number */
func (bsi *BSTIterator) HasNext() bool {
return !bsi.stack.isEmpty()
}
func (bsi *BSTIterator) getTreeNodeOfSmallestNumber() *TreeNode{
return bsi.stack.Pop()
}
func (bsi *BSTIterator) iterate(root *TreeNode) {
for root != nil {
bsi.stack.Push(root)
root = root.Left
}
}
func main() {
}
/*
总结
1. 总体思路就是:通过迭代延迟实现中序遍历
*/
|
package models
import (
"time"
)
// Room : roomテーブルモデル
type Room struct {
ID int64
RoomOwner int64
GameTitle int64
Capacity int
IsLock bool
CreatedAt time.Time
}
|
package cloudflare
import (
"context"
"fmt"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const regionalHostname = "eu.example.com"
func TestListRegions(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": [
{
"key": "ca",
"label": "Canada"
},
{
"key": "eu",
"label": "Europe"
}
],
"success": true,
"errors": [],
"messages": []
}`)
}
mux.HandleFunc("/accounts/"+testAccountID+"/addressing/regional_hostnames/regions", handler)
want := []Region{
{
Key: "ca",
Label: "Canada",
},
{
Key: "eu",
Label: "Europe",
},
}
actual, err := client.ListDataLocalizationRegions(context.Background(), AccountIdentifier(testAccountID), ListDataLocalizationRegionsParams{})
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
}
}
func TestListRegionalHostnames(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": [
{
"hostname": "%s",
"region_key": "ca",
"created_on": "2023-01-14T00:47:57.060267Z"
}
],
"success": true,
"errors": [],
"messages": []
}`, regionalHostname)
}
mux.HandleFunc("/zones/"+testZoneID+"/addressing/regional_hostnames", handler)
createdOn, _ := time.Parse(time.RFC3339, "2023-01-14T00:47:57.060267Z")
want := []RegionalHostname{
{
Hostname: regionalHostname,
RegionKey: "ca",
CreatedOn: &createdOn,
},
}
actual, err := client.ListDataLocalizationRegionalHostnames(context.Background(), ZoneIdentifier(testZoneID), ListDataLocalizationRegionalHostnamesParams{})
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
}
}
func TestCreateRegionalHostname(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodPost, r.Method, "Expected method 'POST', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": {
"hostname": "%s",
"region_key": "ca",
"created_on": "2023-01-14T00:47:57.060267Z"
},
"success": true,
"errors": [],
"messages": []
}`, regionalHostname)
}
mux.HandleFunc("/zones/"+testZoneID+"/addressing/regional_hostnames", handler)
params := CreateDataLocalizationRegionalHostnameParams{
RegionKey: "ca",
Hostname: regionalHostname,
}
want := RegionalHostname{
RegionKey: "ca",
Hostname: regionalHostname,
}
actual, err := client.CreateDataLocalizationRegionalHostname(context.Background(), ZoneIdentifier(testZoneID), params)
createdOn, _ := time.Parse(time.RFC3339, "2023-01-14T00:47:57.060267Z")
want.CreatedOn = &createdOn
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
}
}
func TestGetRegionalHostname(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": {
"hostname": "%s",
"region_key": "ca",
"created_on": "2023-01-14T00:47:57.060267Z"
},
"success": true,
"errors": [],
"messages": []
}`, regionalHostname)
}
mux.HandleFunc("/zones/"+testZoneID+"/addressing/regional_hostnames/"+regionalHostname, handler)
actual, err := client.GetDataLocalizationRegionalHostname(context.Background(), ZoneIdentifier(testZoneID), regionalHostname)
createdOn, _ := time.Parse(time.RFC3339, "2023-01-14T00:47:57.060267Z")
want := RegionalHostname{
RegionKey: "ca",
Hostname: regionalHostname,
CreatedOn: &createdOn,
}
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
}
}
func TestUpdateRegionalHostname(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodPatch, r.Method, "Expected method 'PATCH', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": {
"hostname": "%s",
"region_key": "eu",
"created_on": "2023-01-14T00:47:57.060267Z"
},
"success": true,
"errors": [],
"messages": []
}`, regionalHostname)
}
params := UpdateDataLocalizationRegionalHostnameParams{
RegionKey: "eu",
Hostname: regionalHostname,
}
want := RegionalHostname{
RegionKey: "eu",
Hostname: regionalHostname,
}
mux.HandleFunc("/zones/"+testZoneID+"/addressing/regional_hostnames/"+regionalHostname, handler)
actual, err := client.UpdateDataLocalizationRegionalHostname(context.Background(), ZoneIdentifier(testZoneID), params)
createdOn, _ := time.Parse(time.RFC3339, "2023-01-14T00:47:57.060267Z")
want.CreatedOn = &createdOn
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
}
}
func TestDeleteRegionalHostname(t *testing.T) {
setup()
defer teardown()
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodDelete, r.Method, "Expected method 'DELETE', got %s", r.Method)
}
mux.HandleFunc("/zones/"+testZoneID+"/addressing/regional_hostnames/"+regionalHostname, handler)
err := client.DeleteDataLocalizationRegionalHostname(context.Background(), ZoneIdentifier(testZoneID), regionalHostname)
assert.NoError(t, err)
}
|
package singleton
var lazyInstance *LazySingleton
// 单例模式-懒汉式
type LazySingleton struct {
}
func GetLazyInstance() *LazySingleton {
if lazyInstance == nil {
lazyInstance = &LazySingleton{}
}
return lazyInstance
}
|
package output
import "fmt"
//Show prints the values
func Show(array []int) {
fmt.Println(array)
}
|
package db_query_loan
// 证件信息
import (
"bankBigData/BankServerJournal/entity"
"bankBigData/BankServerJournal/table"
"gitee.com/johng/gf/g"
)
func GetUserInfoByIdCard(idCard string) (entity.S_ecif_ecif_cert_info, error) {
db := g.DB()
sql := db.Table(table.SEcifEcifCertInfo).Where(g.Map{"cert_num": idCard})
data := entity.S_ecif_ecif_cert_info{}
r, err := sql.One()
_ = r.ToStruct(&data)
return data, err
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"bytes"
"fmt"
"io"
"log"
"strings"
"github.com/elastic/go-elasticsearch/v8"
"github.com/fatih/color"
"github.com/tidwall/gjson"
)
var (
faint = color.New(color.Faint)
bold = color.New(color.Bold)
)
func init() {
log.SetFlags(0)
}
func main() {
es, err := elasticsearch.NewDefaultClient()
if err != nil {
log.Fatalf("Error creating client: %s", err)
}
res, err := es.Cluster.Stats(es.Cluster.Stats.WithHuman())
if err != nil {
log.Fatalf("Error getting response: %s", err)
}
defer res.Body.Close()
json := read(res.Body)
fmt.Println(strings.Repeat("─", 50))
faint.Print("cluster ")
// Get cluster name
bold.Print(gjson.Get(json, "cluster_name"))
faint.Print(" status=")
// Get cluster health status
status := gjson.Get(json, "status")
switch status.Str {
case "green":
bold.Add(color.FgHiGreen).Print(status)
case "yellow":
bold.Add(color.FgHiYellow).Print(status)
case "red":
bold.Add(color.FgHiRed).Print(status)
default:
bold.Add(color.FgHiRed, color.Underline).Print(status)
}
fmt.Println("\n" + strings.Repeat("─", 50))
stats := []string{
"indices.count",
"indices.docs.count",
"indices.store.size",
"nodes.count.total",
"nodes.os.mem.used_percent",
"nodes.process.cpu.percent",
"nodes.jvm.versions.#.version",
"nodes.jvm.mem.heap_used",
"nodes.jvm.mem.heap_max",
"nodes.fs.free",
}
var maxwidth int
for _, item := range stats {
if len(item) > maxwidth {
maxwidth = len(item)
}
}
for _, item := range stats {
pad := maxwidth - len(item)
fmt.Print(strings.Repeat(" ", pad))
faint.Printf("%s |", item)
// Get stat dynamically from json
fmt.Printf(" %s\n", gjson.Get(json, item))
}
fmt.Println()
}
func read(r io.Reader) string {
var b bytes.Buffer
b.ReadFrom(r)
return b.String()
}
|
package twelve
import (
"strings"
)
const testVersion = 1
// Song returns the whole twelve days song.
func Song() string {
return `On the first day of Christmas my true love gave to me, a Partridge in a Pear Tree.
On the second day of Christmas my true love gave to me, two Turtle Doves, and a Partridge in a Pear Tree.
On the third day of Christmas my true love gave to me, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the fourth day of Christmas my true love gave to me, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the fifth day of Christmas my true love gave to me, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the sixth day of Christmas my true love gave to me, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the seventh day of Christmas my true love gave to me, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the eighth day of Christmas my true love gave to me, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the ninth day of Christmas my true love gave to me, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the tenth day of Christmas my true love gave to me, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the eleventh day of Christmas my true love gave to me, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
On the twelfth day of Christmas my true love gave to me, twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.
`
}
// Verse returns part of the song for that day.
func Verse(day int) string {
// l - line.
l := strings.Split(Song(), "\n")
return l[day-1]
}
|
package testutils
import (
"testing"
)
func TestIgnoreKernelVersionCheckWhenEnvVarIsSet(t *testing.T) {
tests := []struct {
name string
toIgnoreNamesEnvValue string
testName string
ignoreKernelVersionCheck bool
}{
{
name: "should NOT ignore kernel version check if environment var set to empty string",
toIgnoreNamesEnvValue: "",
testName: "TestABC",
ignoreKernelVersionCheck: false,
},
{
name: "should ignore kernel version check if environment var set to skip test name with single value",
toIgnoreNamesEnvValue: "TestABC",
testName: "TestABC",
ignoreKernelVersionCheck: true,
},
{
name: "should match test name when multiple comma separated names list is provided",
toIgnoreNamesEnvValue: "TestABC,TestXYZ",
testName: "TestXYZ",
ignoreKernelVersionCheck: true,
},
{
name: "should NOT match test name when multiple comma separated names list is provided but name is not present in list",
toIgnoreNamesEnvValue: "TestABC,TestXYZ",
testName: "TestPQR",
ignoreKernelVersionCheck: false,
},
{
name: "should match test name if names list has leading/trailing spaces",
toIgnoreNamesEnvValue: "TestABC, TestXYZ , TestPQR",
testName: "TestXYZ",
ignoreKernelVersionCheck: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv(ignoreKernelVersionEnvVar, tt.toIgnoreNamesEnvValue)
if got := ignoreKernelVersionCheck(tt.testName); got != tt.ignoreKernelVersionCheck {
t.Errorf("ignoreKernelVersionCheck() = %v, want %v", got, tt.ignoreKernelVersionCheck)
}
})
}
}
|
/*
* @lc app=leetcode.cn id=1137 lang=golang
*
* [1137] 第 N 个泰波那契数
*/
package main
// @lc code=start
var TriList = [38]int{
0,
1,
1,
}
func tribonacci(n int) int {
if n != 0 && TriList[n] == 0 {
TriList[n] = tribonacci(n-3) + tribonacci(n-2) + tribonacci(n-1)
}
return TriList[n]
}
// func main() {
// fmt.Println(tribonacci(4), tribonacci(25))
// }
// @lc code=end
|
package handler
import (
"io/ioutil"
"log"
"mime"
"net/http"
"os"
"path/filepath"
"github.com/google/uuid"
"2019_2_IBAT/pkg/app/auth"
"2019_2_IBAT/pkg/app/auth/session"
"2019_2_IBAT/pkg/app/users"
"2019_2_IBAT/pkg/pkg/config"
. "2019_2_IBAT/pkg/pkg/models"
)
const MAXUPLOADSIZE = 32 * 1024 * 1024 // 1 mb
type Handler struct {
InternalDir string
AuthService session.ServiceClient
UserService users.Service
}
func (h *Handler) GetUser(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
authInfo, ok := FromContext(r.Context())
if !ok {
log.Println("GetUser Handler: unauthorized")
SetError(w, http.StatusUnauthorized, UnauthorizedMsg)
return
}
if authInfo.Role == SeekerStr {
seeker, err := h.UserService.GetSeeker(authInfo.ID)
if err != nil {
log.Println("GetUser Handler: failed to get seeker")
SetError(w, http.StatusBadRequest, InternalErrorMsg)
return
}
answer := UserSeekAnswer{
Role: SeekerStr,
Seeker: seeker,
}
answerJSON, _ := answer.MarshalJSON()
w.Write(answerJSON)
} else if authInfo.Role == EmployerStr {
employer, err := h.UserService.GetEmployer(authInfo.ID)
if err != nil {
log.Println("GetUser Handler: failed to get employer")
SetError(w, http.StatusBadRequest, InternalErrorMsg)
return
}
answer := UserEmplAnswer{
Role: EmployerStr,
Employer: employer,
}
answerJSON, _ := answer.MarshalJSON()
w.Write([]byte(answerJSON))
} else {
log.Println("GetUser Handler: unauthorized")
SetError(w, http.StatusUnauthorized, UnauthorizedMsg)
return
}
}
func (h *Handler) DeleteUser(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
authInfo, ok := FromContext(r.Context())
if !ok {
SetError(w, http.StatusUnauthorized, UnauthorizedMsg)
return
}
err := h.UserService.DeleteUser(authInfo)
if err != nil {
SetError(w, http.StatusForbidden, ForbiddenMsg)
return
}
cookie, _ := r.Cookie(auth.CookieName) //костыль
sessionBool, err := h.AuthService.DeleteSession(r.Context(), &session.Cookie{
Cookie: cookie.Value,
})
if !sessionBool.Ok {
SetError(w, http.StatusInternalServerError, InternalErrorMsg)
return
}
http.SetCookie(w, cookie)
}
func (h *Handler) PutUser(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
defer r.Body.Close()
authInfo, ok := FromContext(r.Context())
if !ok {
SetError(w, http.StatusUnauthorized, UnauthorizedMsg)
return
}
if authInfo.Role == SeekerStr {
err := h.UserService.PutSeeker(r.Body, authInfo.ID)
if err != nil {
SetError(w, http.StatusForbidden, ForbiddenMsg)
return
}
} else if authInfo.Role == EmployerStr {
err := h.UserService.PutEmployer(r.Body, authInfo.ID)
if err != nil {
SetError(w, http.StatusForbidden, ForbiddenMsg)
return
}
}
}
func (h *Handler) UploadFile() http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
authInfo, ok := FromContext(r.Context())
if !ok {
SetError(w, http.StatusUnauthorized, UnauthorizedMsg)
return
}
// r.Body = http.MaxBytesReader(w, r.Body, MAXUPLOADSIZE)
if err := r.ParseMultipartForm(MAXUPLOADSIZE); err != nil {
log.Printf("Invalid size: %s", err.Error())
SetError(w, http.StatusBadRequest, "Invalid size")
return
}
file, _, err := r.FormFile("my_file")
if err != nil {
log.Println("my_file")
SetError(w, http.StatusBadRequest, "Invalid form key")
return
}
defer file.Close()
fileBytes, err := ioutil.ReadAll(file)
if err != nil {
log.Println("Bad file")
SetError(w, http.StatusBadRequest, "Bad file")
return
}
filetype := http.DetectContentType(fileBytes)
switch filetype {
case "image/jpeg", "image/jpg":
case "image/gif", "image/png":
break
default:
SetError(w, http.StatusBadRequest, "Invalid extension")
return
}
fileName := uuid.New().String()
fileEndings, err := mime.ExtensionsByType(filetype)
if err != nil {
SetError(w, http.StatusBadRequest, "Invalid extension")
return
}
pkgPath := filepath.Join(h.InternalDir, fileName+fileEndings[0])
newFile, err := os.Create(pkgPath)
if err != nil {
SetError(w, http.StatusInternalServerError, "Failed to set image")
return
}
defer newFile.Close()
if _, err := newFile.Write(fileBytes); err != nil || newFile.Close() != nil {
SetError(w, http.StatusInternalServerError, InternalErrorMsg)
return
}
publicPath := filepath.Join(config.PublicDir, fileName+fileEndings[0])
h.UserService.SetImage(authInfo.ID, authInfo.Role, publicPath)
})
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/bigquery/alpha/bigquery_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigquery/alpha"
)
// DatasetServer implements the gRPC interface for Dataset.
type DatasetServer struct{}
// ProtoToDatasetAccess converts a DatasetAccess object from its proto representation.
func ProtoToBigqueryAlphaDatasetAccess(p *alphapb.BigqueryAlphaDatasetAccess) *alpha.DatasetAccess {
if p == nil {
return nil
}
obj := &alpha.DatasetAccess{
Role: dcl.StringOrNil(p.GetRole()),
UserByEmail: dcl.StringOrNil(p.GetUserByEmail()),
GroupByEmail: dcl.StringOrNil(p.GetGroupByEmail()),
Domain: dcl.StringOrNil(p.GetDomain()),
SpecialGroup: dcl.StringOrNil(p.GetSpecialGroup()),
IamMember: dcl.StringOrNil(p.GetIamMember()),
View: ProtoToBigqueryAlphaDatasetAccessView(p.GetView()),
Routine: ProtoToBigqueryAlphaDatasetAccessRoutine(p.GetRoutine()),
}
return obj
}
// ProtoToDatasetAccessView converts a DatasetAccessView object from its proto representation.
func ProtoToBigqueryAlphaDatasetAccessView(p *alphapb.BigqueryAlphaDatasetAccessView) *alpha.DatasetAccessView {
if p == nil {
return nil
}
obj := &alpha.DatasetAccessView{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
DatasetId: dcl.StringOrNil(p.GetDatasetId()),
TableId: dcl.StringOrNil(p.GetTableId()),
}
return obj
}
// ProtoToDatasetAccessRoutine converts a DatasetAccessRoutine object from its proto representation.
func ProtoToBigqueryAlphaDatasetAccessRoutine(p *alphapb.BigqueryAlphaDatasetAccessRoutine) *alpha.DatasetAccessRoutine {
if p == nil {
return nil
}
obj := &alpha.DatasetAccessRoutine{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
DatasetId: dcl.StringOrNil(p.GetDatasetId()),
RoutineId: dcl.StringOrNil(p.GetRoutineId()),
}
return obj
}
// ProtoToDatasetDefaultEncryptionConfiguration converts a DatasetDefaultEncryptionConfiguration object from its proto representation.
func ProtoToBigqueryAlphaDatasetDefaultEncryptionConfiguration(p *alphapb.BigqueryAlphaDatasetDefaultEncryptionConfiguration) *alpha.DatasetDefaultEncryptionConfiguration {
if p == nil {
return nil
}
obj := &alpha.DatasetDefaultEncryptionConfiguration{
KmsKeyName: dcl.StringOrNil(p.GetKmsKeyName()),
}
return obj
}
// ProtoToDataset converts a Dataset resource from its proto representation.
func ProtoToDataset(p *alphapb.BigqueryAlphaDataset) *alpha.Dataset {
obj := &alpha.Dataset{
Etag: dcl.StringOrNil(p.GetEtag()),
Id: dcl.StringOrNil(p.GetId()),
SelfLink: dcl.StringOrNil(p.GetSelfLink()),
Name: dcl.StringOrNil(p.GetName()),
Project: dcl.StringOrNil(p.GetProject()),
FriendlyName: dcl.StringOrNil(p.GetFriendlyName()),
Description: dcl.StringOrNil(p.GetDescription()),
DefaultTableExpirationMs: dcl.StringOrNil(p.GetDefaultTableExpirationMs()),
DefaultPartitionExpirationMs: dcl.StringOrNil(p.GetDefaultPartitionExpirationMs()),
CreationTime: dcl.Int64OrNil(p.GetCreationTime()),
LastModifiedTime: dcl.Int64OrNil(p.GetLastModifiedTime()),
Location: dcl.StringOrNil(p.GetLocation()),
Published: dcl.Bool(p.GetPublished()),
DefaultEncryptionConfiguration: ProtoToBigqueryAlphaDatasetDefaultEncryptionConfiguration(p.GetDefaultEncryptionConfiguration()),
}
for _, r := range p.GetAccess() {
obj.Access = append(obj.Access, *ProtoToBigqueryAlphaDatasetAccess(r))
}
return obj
}
// DatasetAccessToProto converts a DatasetAccess object to its proto representation.
func BigqueryAlphaDatasetAccessToProto(o *alpha.DatasetAccess) *alphapb.BigqueryAlphaDatasetAccess {
if o == nil {
return nil
}
p := &alphapb.BigqueryAlphaDatasetAccess{}
p.SetRole(dcl.ValueOrEmptyString(o.Role))
p.SetUserByEmail(dcl.ValueOrEmptyString(o.UserByEmail))
p.SetGroupByEmail(dcl.ValueOrEmptyString(o.GroupByEmail))
p.SetDomain(dcl.ValueOrEmptyString(o.Domain))
p.SetSpecialGroup(dcl.ValueOrEmptyString(o.SpecialGroup))
p.SetIamMember(dcl.ValueOrEmptyString(o.IamMember))
p.SetView(BigqueryAlphaDatasetAccessViewToProto(o.View))
p.SetRoutine(BigqueryAlphaDatasetAccessRoutineToProto(o.Routine))
return p
}
// DatasetAccessViewToProto converts a DatasetAccessView object to its proto representation.
func BigqueryAlphaDatasetAccessViewToProto(o *alpha.DatasetAccessView) *alphapb.BigqueryAlphaDatasetAccessView {
if o == nil {
return nil
}
p := &alphapb.BigqueryAlphaDatasetAccessView{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetDatasetId(dcl.ValueOrEmptyString(o.DatasetId))
p.SetTableId(dcl.ValueOrEmptyString(o.TableId))
return p
}
// DatasetAccessRoutineToProto converts a DatasetAccessRoutine object to its proto representation.
func BigqueryAlphaDatasetAccessRoutineToProto(o *alpha.DatasetAccessRoutine) *alphapb.BigqueryAlphaDatasetAccessRoutine {
if o == nil {
return nil
}
p := &alphapb.BigqueryAlphaDatasetAccessRoutine{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetDatasetId(dcl.ValueOrEmptyString(o.DatasetId))
p.SetRoutineId(dcl.ValueOrEmptyString(o.RoutineId))
return p
}
// DatasetDefaultEncryptionConfigurationToProto converts a DatasetDefaultEncryptionConfiguration object to its proto representation.
func BigqueryAlphaDatasetDefaultEncryptionConfigurationToProto(o *alpha.DatasetDefaultEncryptionConfiguration) *alphapb.BigqueryAlphaDatasetDefaultEncryptionConfiguration {
if o == nil {
return nil
}
p := &alphapb.BigqueryAlphaDatasetDefaultEncryptionConfiguration{}
p.SetKmsKeyName(dcl.ValueOrEmptyString(o.KmsKeyName))
return p
}
// DatasetToProto converts a Dataset resource to its proto representation.
func DatasetToProto(resource *alpha.Dataset) *alphapb.BigqueryAlphaDataset {
p := &alphapb.BigqueryAlphaDataset{}
p.SetEtag(dcl.ValueOrEmptyString(resource.Etag))
p.SetId(dcl.ValueOrEmptyString(resource.Id))
p.SetSelfLink(dcl.ValueOrEmptyString(resource.SelfLink))
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetFriendlyName(dcl.ValueOrEmptyString(resource.FriendlyName))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetDefaultTableExpirationMs(dcl.ValueOrEmptyString(resource.DefaultTableExpirationMs))
p.SetDefaultPartitionExpirationMs(dcl.ValueOrEmptyString(resource.DefaultPartitionExpirationMs))
p.SetCreationTime(dcl.ValueOrEmptyInt64(resource.CreationTime))
p.SetLastModifiedTime(dcl.ValueOrEmptyInt64(resource.LastModifiedTime))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetPublished(dcl.ValueOrEmptyBool(resource.Published))
p.SetDefaultEncryptionConfiguration(BigqueryAlphaDatasetDefaultEncryptionConfigurationToProto(resource.DefaultEncryptionConfiguration))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
sAccess := make([]*alphapb.BigqueryAlphaDatasetAccess, len(resource.Access))
for i, r := range resource.Access {
sAccess[i] = BigqueryAlphaDatasetAccessToProto(&r)
}
p.SetAccess(sAccess)
return p
}
// applyDataset handles the gRPC request by passing it to the underlying Dataset Apply() method.
func (s *DatasetServer) applyDataset(ctx context.Context, c *alpha.Client, request *alphapb.ApplyBigqueryAlphaDatasetRequest) (*alphapb.BigqueryAlphaDataset, error) {
p := ProtoToDataset(request.GetResource())
res, err := c.ApplyDataset(ctx, p)
if err != nil {
return nil, err
}
r := DatasetToProto(res)
return r, nil
}
// applyBigqueryAlphaDataset handles the gRPC request by passing it to the underlying Dataset Apply() method.
func (s *DatasetServer) ApplyBigqueryAlphaDataset(ctx context.Context, request *alphapb.ApplyBigqueryAlphaDatasetRequest) (*alphapb.BigqueryAlphaDataset, error) {
cl, err := createConfigDataset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyDataset(ctx, cl, request)
}
// DeleteDataset handles the gRPC request by passing it to the underlying Dataset Delete() method.
func (s *DatasetServer) DeleteBigqueryAlphaDataset(ctx context.Context, request *alphapb.DeleteBigqueryAlphaDatasetRequest) (*emptypb.Empty, error) {
cl, err := createConfigDataset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteDataset(ctx, ProtoToDataset(request.GetResource()))
}
// ListBigqueryAlphaDataset handles the gRPC request by passing it to the underlying DatasetList() method.
func (s *DatasetServer) ListBigqueryAlphaDataset(ctx context.Context, request *alphapb.ListBigqueryAlphaDatasetRequest) (*alphapb.ListBigqueryAlphaDatasetResponse, error) {
cl, err := createConfigDataset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListDataset(ctx, request.GetProject())
if err != nil {
return nil, err
}
var protos []*alphapb.BigqueryAlphaDataset
for _, r := range resources.Items {
rp := DatasetToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListBigqueryAlphaDatasetResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigDataset(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package mqtt
import (
"context"
"io/ioutil"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/tools/mqttfakes"
"github.com/batchcorp/plumber/validate"
)
var _ = Describe("MQTT Backend", func() {
var m *MQTT
var relayOpts *opts.RelayOptions
BeforeEach(func() {
m = &MQTT{
connArgs: &args.MQTTConn{},
client: &mqttfakes.FakeClient{},
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
relayOpts = &opts.RelayOptions{
Mqtt: &opts.RelayGroupMQTTOptions{
Args: &args.MQTTReadArgs{
Topic: "test",
ReadTimeoutSeconds: 1,
},
},
}
})
Context("validateRelayOptions", func() {
It("validates nil relay options", func() {
err := validateRelayOptions(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyRelayOpts))
})
It("validates missing backend group", func() {
relayOpts.Mqtt = nil
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendGroup))
})
It("validates missing backend args", func() {
relayOpts.Mqtt.Args = nil
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendArgs))
})
It("validates empty topic", func() {
relayOpts.Mqtt.Args.Topic = ""
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrEmptyTopic))
})
})
Context("Relay", func() {
It("validates relay options", func() {
err := m.Relay(context.Background(), nil, nil, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyRelayOpts.Error()))
})
It("returns an error when subscribe fails", func() {
fakeMQTT := &mqttfakes.FakeClient{}
fakeMQTT.SubscribeStub = func(topic string, qos byte, handler mqtt.MessageHandler) mqtt.Token {
return &mqttfakes.FakeToken{
ErrorStub: func() error {
return errors.New("test error")
},
}
}
m.client = fakeMQTT
relayCh := make(chan interface{}, 1)
errorsCh := make(chan *records.ErrorRecord, 1)
err := m.Relay(context.Background(), relayOpts, relayCh, errorsCh)
Expect(err).To(HaveOccurred())
Expect(fakeMQTT.SubscribeCallCount()).To(Equal(1))
})
It("relays a message", func() {
fakeMQTT := &mqttfakes.FakeClient{}
fakeMQTT.SubscribeStub = func(topic string, qos byte, handler mqtt.MessageHandler) mqtt.Token {
msg := &mqttfakes.FakeMessage{}
msg.PayloadStub = func() []byte {
return []byte(`testing`)
}
handler(fakeMQTT, msg)
return &mqttfakes.FakeToken{}
}
m.client = fakeMQTT
relayCh := make(chan interface{}, 1)
errorCh := make(chan *records.ErrorRecord, 1)
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(time.Millisecond * 200)
cancel()
}()
err := m.Relay(ctx, relayOpts, relayCh, errorCh)
Expect(err).ToNot(HaveOccurred())
Expect(fakeMQTT.SubscribeCallCount()).To(Equal(1))
Expect(relayCh).To(Receive())
})
})
})
|
package gray_image
import (
"testing"
)
func Test_ResizeProportional(t *testing.T) {
rawFile := "./testdata/big.jpg"
dstFile := "./testdata/out.jpg"
err := ResizeProportional(rawFile, dstFile, 500, 100)
if err != nil {
t.Errorf("resize error: %v", err)
}
}
|
package gfuns
type FFprobe struct {
Streams []struct {
Index int `json:"index"`
CodecName string `json:"codec_name"`
CodecLongName string `json:"codec_long_name"`
Profile string `json:"profile"`
CodecType string `json:"codec_type"`
CodecTimeBase string `json:"codec_time_base"`
CodecTagString string `json:"codec_tag_string"`
CodecTag string `json:"codec_tag"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
CodedWidth int `json:"coded_width,omitempty"`
CodedHeight int `json:"coded_height,omitempty"`
HasBFrames int `json:"has_b_frames,omitempty"`
SampleAspectRatio string `json:"sample_aspect_ratio,omitempty"`
DisplayAspectRatio string `json:"display_aspect_ratio,omitempty"`
PixFmt string `json:"pix_fmt,omitempty"`
Level int `json:"level,omitempty"`
ChromaLocation string `json:"chroma_location,omitempty"`
Refs int `json:"refs,omitempty"`
IsAvc string `json:"is_avc,omitempty"`
NalLengthSize string `json:"nal_length_size,omitempty"`
RFrameRate string `json:"r_frame_rate"`
AvgFrameRate string `json:"avg_frame_rate"`
TimeBase string `json:"time_base"`
StartPts int `json:"start_pts"`
StartTime string `json:"start_time"`
DurationTs int `json:"duration_ts"`
Duration string `json:"duration"`
BitRate string `json:"bit_rate"`
BitsPerRawSample string `json:"bits_per_raw_sample,omitempty"`
NbFrames string `json:"nb_frames"`
Disposition struct {
Default int `json:"default"`
Dub int `json:"dub"`
Original int `json:"original"`
Comment int `json:"comment"`
Lyrics int `json:"lyrics"`
Karaoke int `json:"karaoke"`
Forced int `json:"forced"`
HearingImpaired int `json:"hearing_impaired"`
VisualImpaired int `json:"visual_impaired"`
CleanEffects int `json:"clean_effects"`
AttachedPic int `json:"attached_pic"`
TimedThumbnails int `json:"timed_thumbnails"`
} `json:"disposition"`
Tags struct {
Language string `json:"language"`
HandlerName string `json:"handler_name"`
Encoder string `json:"encoder"`
} `json:"tags,omitempty"`
SampleFmt string `json:"sample_fmt,omitempty"`
SampleRate string `json:"sample_rate,omitempty"`
Channels int `json:"channels,omitempty"`
ChannelLayout string `json:"channel_layout,omitempty"`
BitsPerSample int `json:"bits_per_sample,omitempty"`
MaxBitRate string `json:"max_bit_rate,omitempty"`
} `json:"streams"`
Format struct {
Filename string `json:"filename"`
NbStreams int `json:"nb_streams"`
NbPrograms int `json:"nb_programs"`
FormatName string `json:"format_name"`
FormatLongName string `json:"format_long_name"`
StartTime string `json:"start_time"`
Duration string `json:"duration"`
Size string `json:"size"`
BitRate string `json:"bit_rate"`
ProbeScore int `json:"probe_score"`
Tags struct {
MajorBrand string `json:"major_brand"`
MinorVersion string `json:"minor_version"`
CompatibleBrands string `json:"compatible_brands"`
Encoder string `json:"encoder"`
} `json:"tags"`
} `json:"format"`
}
type GCSEvent struct {
Id string `json:"id"`
Time float64 `json:"time"`
Bucket string `json:"bucket"`
Name string `json:"name"`
Pattern string `json:"pattern"`
Resolution Resolution `json:"resolution"`
Meta Meta `json:"meta"`
File File `json:"file"`
Impls []Resolution `json:"impls"`
Result []string `json:"result"`
Tags Tags `json:"tags"`
}
type Meta struct {
Key string `json:"key"`
Iv string `json:"iv"`
}
type Base struct {
Id string `json:"id"`
Title string `json:"title"`
Meta string `json:"meta"`
UserId string `json:"userId"`
CreatedDate string `json:"createdDate"`
}
type File struct {
Base
Url string `json:"url"`
Ip string `json:"ip"`
}
type Resolution struct {
Width int `json:"width"`
Height int `json:"height"`
Id string `json:"id"`
}
type Properties struct {
ContentType string `json:"content_type"`
DeliveryMode int `json:"delivery_mode"`
Priority int `json:"priority"`
MessageID string `json:"message_id"`
Timestamp int `json:"timestamp"`
}
type Result struct {
Properties Properties `json:"properties"`
RoutingKey string `json:"routing_key"`
Payload string `json:"payload"`
PayloadEncoding string `json:"payload_encoding"`
}
type Tags struct {
Master string `json:"master"`
Index string `json:"index"`
}
|
package decodestring
func decodeString(s string) string {
numStack := []int{}
strStack := []string{}
var curInt = 0
var t = ""
for i := 0; i < len(s); i++ {
if isNumber(s[i]) {
curInt = curInt*10 + int(s[i]-'0')
} else if s[i] == '[' {
numStack = append(numStack, curInt)
strStack = append(strStack, t)
curInt = 0
t = ""
} else if s[i] == ']' {
num := numStack[len(numStack)-1]
numStack = numStack[:len(numStack)-1]
for j := 0; j < num; j++ {
strStack[len(strStack)-1] += t
}
t = strStack[len(strStack)-1]
strStack = strStack[:len(strStack)-1]
} else {
t += string(s[i])
}
}
if len(strStack) == 0 {
return t
}
return strStack[len(strStack)-1]
}
func isChar(b byte) bool {
return b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z'
}
func isNumber(b byte) bool {
return b >= '0' && b <= '9'
}
|
// package semver provides a type representing a semantic version, and
// facilities for parsing, serialisation and comparison.
//
// See http://semver.org for more information on semantic versioning.
//
// This package expands on the specification: a partial version string like
// "v2" or "v2.0" is considered valid, and expanded to "v2.0.0".
//
// To parse a version string:
//
// s := "v1.0.7-alpha"
// v, err := semver.Parse(s)
// if err != nil {
// panic(err)
// }
// fmt.Println(v)
//
// Visit godoc.org/github.com/ceralena/semver for the full package API.
package semver
import (
"bytes"
"errors"
"fmt"
"sort"
"strconv"
)
// A Version is a parsed semver version string.
//
// If only a partial version was specified, the missing parts will be -1.
type Version struct {
Major int
Minor int
Patch int
Prerelease string
}
// Returns the standard string representation of the Version value.
//
// For example, given this Version value:
//
// Version{Major: 1, Minor: 2, Patch: 3, Prerelease: "beta1"}
//
// The following string is produced:
//
// v1.2.3-beta1
func (v Version) String() string {
if len(v.Prerelease) > 0 {
return fmt.Sprintf("v%d.%d.%d-%s", v.Major, v.Minor, v.Patch, v.Prerelease)
} else {
return fmt.Sprintf("v%d.%d.%d", v.Major, v.Minor, v.Patch)
}
}
// GreaterThan returns true if v is a higher version than o.
func (v Version) GreaterThan(o Version) bool {
if v.Major != o.Major {
return v.Major > o.Major
} else if v.Minor != o.Minor {
return v.Minor > o.Minor
} else if v.Patch != o.Patch {
return v.Patch > o.Patch
} else if v.Prerelease == o.Prerelease {
return false
}
sl := []string{v.Prerelease, o.Prerelease}
sort.Strings(sl)
if sl[0] == v.Prerelease {
return false
}
return true
}
// LessThan returns true if v is a lesser version than o.
func (v Version) LessThan(o Version) bool {
return !v.Equals(o) && !v.GreaterThan(o)
}
// Equals returns true if v and o are the same version.
func (v Version) Equals(o Version) bool {
return v.Major == o.Major && v.Minor == o.Minor && v.Patch == o.Patch && v.Prerelease == o.Prerelease
}
// States for parsing state machine.
// The states are ordered - parsing can only advance forwards.
// We can only advance forwards.
type parseState int
const (
atStart parseState = iota
foundV
foundMajor
foundMinor
foundPatch
foundPrerelease
)
var (
EmptyVersion = errors.New("Empty version string")
IllegalVersion = errors.New("Illegal version string")
)
// Parse takes the string representation of a version and returns a Version
// value if is valid.
//
// The error value will either be nil, EmptyVersion or IllegalVersion.
func Parse(s string) (Version, error) {
var (
v Version // Version object we'll return
state parseState = atStart // current parsing state
pos int // pointer into the string
buf = bytes.NewBuffer(nil) // container for temporary state while we loop
err error
)
if len(s) == 0 {
return v, EmptyVersion
}
// Loop until we find an error or we've finished parsing the string
for pos < len(s) {
switch state {
case atStart:
if s[pos] == 'v' {
pos = pos + 1
state = foundV
} else {
return v, IllegalVersion
}
case foundV:
var maj int
if maj, pos, err = readNextNum(s, pos, buf); err != nil {
return v, err
}
v.Major = maj
state = foundMajor
case foundMajor:
var minor int
if minor, pos, err = readNextNum(s, pos, buf); err != nil {
return v, err
}
v.Minor = minor
state = foundMinor
case foundMinor:
var patch int
if patch, pos, err = readNextNum(s, pos, buf); err != nil {
return v, err
}
v.Patch = patch
state = foundPatch
case foundPatch:
v.Prerelease = s[pos:]
pos = len(s)
state = foundPrerelease
}
}
if state < foundMajor {
// At minimum we need a major version
return v, IllegalVersion
}
return v, nil
}
// Read the next version number from this cursor in the string.
// buf should be an empty bytes.Buffer. The buffer will be automatically reset.
//
// Reads until a period, hyphen or the end of the string.
//
// Returns the version number, the new cursor point and any error.
func readNextNum(s string, curs int, buf *bytes.Buffer) (int, int, error) {
defer buf.Reset()
for ; curs < len(s) && s[curs] != '.' && s[curs] != '-'; curs += 1 {
buf.WriteByte(s[curs])
}
i, err := strconv.Atoi(buf.String())
if err != nil {
return -1, curs, IllegalVersion
}
return i, curs + 1, nil
}
|
package denvlib
import (
"io/ioutil"
"os"
"reflect"
"testing"
)
func TestIgnore(t *testing.T) {
var d *Denv
d = NewDenv("test-ignore")
err := os.RemoveAll(d.Path + "/*")
check(err)
patterns := []byte(".test\n*.test")
err = ioutil.WriteFile(d.expandPath(Settings.IgnoreFile), patterns, 0644)
d.LoadIgnore()
check(err)
cases := []struct {
in string
ignored bool
}{
{"", true},
{".test", true},
{d.expandPath(".test"), true},
{d.expandPath("test"), true},
{d.expandPath(".legit"), false},
{"hey.test", true},
{"test.txt", true},
{d.expandPath("hey.test"), true},
{d.expandPath(".hey.test"), true},
{d.expandPath("test.txt"), true},
{d.expandPath(".legit.txt"), false},
}
for _, c := range cases {
ignored := d.IsIgnored(c.in)
if ignored != c.ignored {
t.Errorf("IsIgnored(%q) != %t", c.in, c.ignored)
}
}
d.remove()
d = NewDenv("test-ignore")
//This are more for default ignores
cases = []struct {
in string
ignored bool
}{
{".denv", true},
{d.expandPath(".denv"), true},
{d.expandPath(".denvignore"), true},
{d.expandPath(".bash_history"), true},
{d.expandPath(".viminfo"), true},
{d.expandPath(".legit"), false},
{".bash_history", true},
}
for _, c := range cases {
ignored := d.IsIgnored(c.in)
if ignored != c.ignored {
t.Errorf("IsIgnored(%q) != %t", c.in, c.ignored)
}
}
d.remove()
}
func TestInclude(t *testing.T) {
d := NewDenv("test-include")
ioutil.WriteFile(d.expandPath(".test.txt"), []byte("derp"), 0644)
in, ex, _ := d.Files()
wantIn := []string{d.expandPath(".test.txt")}
wantEx := []string{d.expandPath(".denvignore")}
if !reflect.DeepEqual(in, wantIn) {
t.Errorf("Included files did not match, Want: %q, Got: %q", wantIn, in)
}
if !reflect.DeepEqual(ex, wantEx) {
t.Errorf("Ignored files did not match, Want: %q, Got: %q", wantEx, ex)
}
os.Remove(".test.txt")
d.remove()
}
func TestMatchedFiles(t *testing.T) {
from := NewDenv("test-matched-files-from")
to := NewDenv("test-matched-files-to")
//TODO make api for changing gitignore
want := []string{to.expandPath(".test.txt")}
ioutil.WriteFile(want[0], []byte("derp"), 0644)
got, _, _ := from.MatchedFiles(to.Path)
if !reflect.DeepEqual(want, got) {
t.Errorf("MatchedFiles(%q) != %q, got %q", to.Path, want, got)
}
from.remove()
to.remove()
}
func TestExpandPath(t *testing.T) {
}
|
package fslm
// Basic types and related constants.
import (
"flag"
"fmt"
"io"
"math"
"strconv"
"github.com/kho/word"
)
// StateId represents a language model state.
type StateId uint32
const (
STATE_NIL StateId = ^StateId(0) // An invalid state.
_STATE_EMPTY StateId = 0 // Models always uses state 0 for empty context.
_STATE_START StateId = 1 // Models always uses state 1 for start.
)
// Weight is the floating point number type for log-probabilities.
type Weight float32
const WEIGHT_SIZE = 32 // The bit size of Weight.
func (w *Weight) String() string {
return strconv.FormatFloat(float64(*w), 'g', -1, 32)
}
func (w *Weight) Set(s string) error {
f, err := strconv.ParseFloat(s, 32)
if err == nil {
*w = Weight(f)
}
return err
}
// I seriously do not care about any platform that supports Go but
// does not support IEEE 754 infinity.
var (
WEIGHT_LOG0 = Weight(math.Inf(-1))
textLog0 = Weight(-99)
)
func init() {
flag.Var(&textLog0, "fslm.log0", "treat weight <= this as log(0)")
}
type StateWeight struct {
State StateId
Weight Weight
}
type WordStateWeight struct {
Word word.Id
State StateId
Weight Weight
}
// Model is the general interface of an N-gram langauge model. It is
// mostly for convenience and the actual implementations should be
// prefered to speed up look-ups.
type Model interface {
// Start returns the start state, i.e. the state with context
// <s>. The user should never explicitly query <s>, which has
// undefined behavior (see NextI).
Start() StateId
// NextI finds out the next state to go from p consuming x. x can
// not be <s> or </s>, in which case the result is undefined, but
// can be word.NIL. Any x that is not part of the model's vocabulary
// is treated as OOV. The returned weight w is WEIGHT_LOG0 if and
// only if unigram x is an OOV (note: although rare, it is possible
// to have "<s> x" but not "x" in the LM, in which case "x" is also
// considered an OOV when not occuring as the first token of a
// sentence).
NextI(p StateId, x word.Id) (q StateId, w Weight)
// NextS is similar to NextI. s can be anything but <s> or </s>, in
// which case the result is undefined.
NextS(p StateId, x string) (q StateId, w Weight)
// Final returns the final weight of "consuming" </s> from p. A
// sentence query should finish with this to properly score the
// *whole* sentence.
Final(p StateId) Weight
// Vocab returns the model's vocabulary and special sentence
// boundary symbols.
Vocab() (vocab *word.Vocab, bos, eos string, bosId, eosId word.Id)
}
// IterableModel is a language model whose states and transitions can
// be iterated.
type IterableModel interface {
Model
// NumStates returns the number of states. StateIds are always from
// 0 to (the number of states - 1).
NumStates() int
// Transitions returns a channel that can be used to iterate over
// the non-back-off transitions from a given state.
Transitions(p StateId) chan WordStateWeight
// BackOff returns the back off state and weight of p. The back off
// state of the empty context is STATE_NIL and its weight is
// arbitrary.
BackOff(p StateId) (q StateId, w Weight)
}
// Graphviz prints out the finite-state topology of the model that can
// be visualized with Graphviz. Mostly for debugging; could be quite
// slow.
func Graphviz(m IterableModel, w io.Writer) {
vocab, _, _, _, _ := m.Vocab()
fmt.Fprintln(w, "digraph {")
fmt.Fprintln(w, " // lexical transitions")
for i := 0; i < m.NumStates(); i++ {
p := StateId(i)
for xqw := range m.Transitions(p) {
x, q, ww := xqw.Word, xqw.State, xqw.Weight
fmt.Fprintf(w, " %d -> %d [label=%q]\n", p, q, fmt.Sprintf("%s : %g", vocab.StringOf(x), ww))
}
}
fmt.Fprintln(w, " // back-off transitions")
for i := 0; i < m.NumStates(); i++ {
q, ww := m.BackOff(StateId(i))
fmt.Fprintf(w, " %d -> %d [label=%q,style=dashed]\n", i, q, fmt.Sprintf("%g", ww))
}
fmt.Fprintln(w, "}")
}
// A list of implemented models.
const (
MODEL_HASHED = iota
MODEL_SORTED
)
// Magic words for binary formats.
const (
MAGIC_HASHED = "#fslm.hash"
MAGIC_SORTED = "#fslm.sort"
)
|
package timbler
import (
"errors"
"time"
logx "github.com/my0sot1s/godef/log"
convt "github.com/my0sot1s/godef/convt"
)
// RoomHub room service
type RoomHub struct {
rooms map[*Room]bool
created int
}
// Init roomHub
func (rh *RoomHub) Init() {
rh.rooms = make(map[*Room]bool)
rh.created = time.Now().Nanosecond()
}
// IsRoomExisted check room name is existed
func (rh RoomHub) IsRoomExisted(rname string) bool {
for r := range rh.rooms {
if r.GetName() == rname {
return true
}
}
return false
}
// GetRoomByName is get *Room by rname
func (rh *RoomHub) GetRoomByName(rname string) *Room {
for r := range rh.rooms {
if r.GetName() == rname {
return r
}
}
logx.Log("Room not existed")
return nil
}
//ConnectionCountOnRoom count connection Online on room
func (rh RoomHub) ConnectionCountOnRoom(rname string) int {
for r := range rh.rooms {
if r.GetName() == rname {
return len(r.Clients)
}
}
logx.Log("Can not found room ", rname)
return -1
}
// AddNewRoom is action add new Room
func (rh *RoomHub) AddNewRoom(room *Room) {
for R := range rh.rooms {
if R.GetName() == room.GetName() {
logx.Log("+ Room is Existed")
return
}
}
rh.rooms[room] = true
logx.Log("+ Room is Added: ", room.Name)
}
// RemoveRoom on hub
func (rh *RoomHub) RemoveRoom(rname string) {
for r, state := range rh.rooms {
if r.GetName() == rname && state {
delete(rh.rooms, r)
logx.Log("+ Room is Deleted: ", r.Name)
return
}
}
logx.Log("+ Room is Not Existed: ", rname)
}
//SendMessageToRoom is push message to client
func (rh *RoomHub) SendMessageToRoom(room *Room, msg *Message) {
logx.Log("Sent to , ", room.GetName())
for r := range rh.rooms {
if r.GetName() == room.GetName() {
logx.Log(len(r.Clients))
go r.broadcast(msg)
return
}
}
logx.Log("Not found room")
}
// InjectEvent4Hub is push event to client
func (rh *RoomHub) InjectEvent4Hub(id string, event string, rooms []string) bool {
for r := range rh.rooms {
if isDone := r.find4SubOrUnsub(id, event, rooms); isDone {
return isDone
}
}
return false
}
// IsExistConnection check connection by Id is Existed
func (rh *RoomHub) IsExistConnection(connectionID string) bool {
for r := range rh.rooms {
if isDone := r.findWithConnectionId(connectionID); isDone {
return isDone
}
}
return false
}
// Room is a unit have many connection
type Room struct {
Name string
ID string
Clients map[*Connection]bool
}
// GetID is get room ID
func (r Room) GetID() string {
return r.ID
}
// GetName is get room Name
func (r Room) GetName() string {
return r.Name
}
func (r *Room) createRoom(name string) {
if name == "" {
logx.ErrLog(errors.New("No room name"))
return
}
r.Name = name
r.ID = convt.CreateID("ro")
r.Clients = make(map[*Connection]bool)
}
func (r *Room) addClient(c *Connection) {
for v := range r.Clients {
if c.GetID() == v.GetID() {
logx.Log("++ Connection is existed", "cyan")
return
}
}
logx.Log("++ Add client success ", c.GetID(), "green")
r.Clients[c] = true
}
func (r *Room) removeClient(c *Connection) {
for v := range r.Clients {
if c.GetID() == v.GetID() {
// c.connection.Close()
logx.Log("++ Deleted client success ", c.GetID(), "green")
delete(r.Clients, c)
return
}
}
logx.Log("Connection is existed", "cyan")
}
func (r *Room) broadcast(msg *Message) {
for c := range r.Clients {
logx.Log("--->", string(msg.toByte()))
c.send <- msg.toByte()
}
}
func (r *Room) find4SubOrUnsub(id string, event string, rooms []string) bool {
for c := range r.Clients {
if c.GetID() != id {
continue
}
if event == "subscribe" {
c.Subscribe(rooms)
} else if event == "unsubscribe" {
c.Unsubscribe(rooms)
}
return true
}
return false
}
func (r *Room) findWithConnectionId(id string) bool {
for c := range r.Clients {
if c.GetID() != id {
continue
}
return true
}
return false
}
|
package main
import (
"../config"
"../network/localip"
"fmt"
)
func initializeLiftData() config.Lift {
var lift config.Lift
var requests [config.NumFloors][config.NumButtons]bool
id, err := localip.LocalIP()
if err != nil {
for f := 0; f < config.NumFloors; f++ {
for b := 0; b < config.NumButtons; b++ {
requests[f][b] = false
}
}
lift = config.Lift{id,
true,
-1,
-1,
config.MD_Stop,
config.LiftIdle,
requests}
}
return lift
}
var nodeMap config.NodeMap
func main() {
Lift1 := initializeLiftData()
Lift2 := initializeLiftData()
Lift2.LastKnownFloor = 1
fmt.Println(Lift1 == Lift2)
nodeMap = make(config.NodeMap)
//"panic: assignment to entry in nil map" no matter what I try....
nodeMap["testLift"] = initializeLiftData()
val, ok := nodeMap["testLift1"]
if !ok {
fmt.Println("testLift key in map")
fmt.Println(val.Requests[0][0])
}
//fmt.Println(nodeMap["testLift"].Requests[0][0])
}
/*Maps and printing can go to hell.......*/
//LOL
//http://stackoverflow.com/questions/40578646/golang-i-have-a-map-of-structs-why-cant-i-directly-modify-a-field-in-a-struct
//LOOK AT THIS SONDRE:
//https://play.golang.org/p/ecdUU30FQT
|
// +build !qml
package album
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/widgets"
"github.com/therecipe/qt/internal/examples/sql/masterdetail_qml/controller"
)
type albumController struct {
widgets.QGroupBox
_ func() `constructor:"init"`
_ *core.QAbstractItemModel `property:"viewModel"`
//<-controller
_ func() `signal:"albumAdded"`
_ func() `signal:"deleteAlbumRequest"`
_ func() `signal:"deleteAlbumCommand"`
//->controller
_ func() `signal:"showImageLabel"`
_ func(index *core.QModelIndex) `signal:"deleteAlbum"`
_ func(column int, order core.Qt__SortOrder) `signal:"sortTableView"`
_ func(index *core.QModelIndex) `signal:"showAlbumDetails"`
_ func(title, artist string) `signal:"deleteAlbumShowRequest"`
//
albumView *widgets.QTableView
}
func (a *albumController) init() {
a.albumView = widgets.NewQTableView(nil)
a.albumView.SetModel(controller.Instance.AlbumModel())
a.albumView.SetEditTriggers(widgets.QAbstractItemView__NoEditTriggers)
a.albumView.SetSortingEnabled(true)
a.albumView.SetSelectionBehavior(widgets.QAbstractItemView__SelectRows)
a.albumView.SetSelectionMode(widgets.QAbstractItemView__SingleSelection)
a.albumView.SetShowGrid(false)
a.albumView.VerticalHeader().Hide()
a.albumView.SetAlternatingRowColors(true)
a.adjustHeader()
locale := a.albumView.Locale()
locale.SetNumberOptions(core.QLocale__OmitGroupSeparator)
a.albumView.SetLocale(locale)
a.albumView.ConnectClicked(func(index *core.QModelIndex) { a.ShowAlbumDetails(index) })
a.albumView.ConnectActivated(func(index *core.QModelIndex) { a.ShowAlbumDetails(index) })
layout := widgets.NewQVBoxLayout()
layout.AddWidget(a.albumView, 0, 0)
a.SetLayout(layout)
//
//<-controller
controller.Instance.ConnectAddAlbum(func(string, string, int, string) { a.albumAdded() })
controller.Instance.ConnectDeleteAlbumRequest(a.deleteAlbumRequest)
controller.Instance.ConnectDeleteAlbumCommand(a.deleteAlbumCommand)
//->controller
a.ConnectShowImageLabel(controller.Instance.ShowImageLabel)
a.ConnectDeleteAlbum(func(index *core.QModelIndex) { controller.Instance.DeleteAlbum(index) })
a.ConnectSortTableView(controller.Instance.SortTableView)
a.ConnectShowAlbumDetails(func(index *core.QModelIndex) { controller.Instance.ShowAlbumDetails(index) })
a.ConnectDeleteAlbumShowRequest(controller.Instance.DeleteAlbumShowRequest)
}
func (a *albumController) albumAdded() {
a.albumView.SelectionModel().Clear()
a.albumView.SelectRow(a.albumView.Model().RowCount(core.NewQModelIndex()) - 1)
a.ShowAlbumDetails(a.albumView.Model().Index(a.albumView.Model().RowCount(core.NewQModelIndex())-1, 0, core.NewQModelIndex()))
a.adjustHeader()
}
func (a *albumController) deleteAlbumRequest() {
index := a.albumView.CurrentIndex()
title := index.Data(int(core.Qt__UserRole) + 2).ToString()
artist := index.Data(int(core.Qt__UserRole) + 3).ToString()
a.DeleteAlbumShowRequest(title, artist)
}
func (a *albumController) deleteAlbumCommand() {
a.DeleteAlbum(a.albumView.CurrentIndex())
a.ShowImageLabel()
a.adjustHeader()
}
func (a *albumController) adjustHeader() {
a.albumView.HideColumn(0)
a.albumView.HorizontalHeader().SetSectionResizeMode2(1, widgets.QHeaderView__Stretch)
a.albumView.ResizeColumnToContents(2)
a.albumView.ResizeColumnToContents(3)
}
|
package main
import (
"bufio"
"encoding/csv"
"flag"
"fmt"
"log"
"os"
"strings"
"time"
)
type data struct {
points int
sheet map[string]string
}
var (
file *string
duration *int
)
func init() {
file = flag.String("test", "problems.csv", "path to test file")
duration = flag.Int("time", 10, "quiz duration in seconds")
}
func main() {
flag.Parse()
records := readCSV(*file)
sheet := make(map[string]string)
for _, line := range records {
sheet[line[0]] = line[1]
}
quiz := data{
points: 0,
sheet: sheet,
}
ch := make(chan int)
fmt.Println("Quiz has started!")
fmt.Printf("Total time: %v seconds\n", *duration)
go startQuiz(&quiz, ch)
go timer(*duration, ch)
<-ch
fmt.Printf("\nYou scored %v out of %v!\n", quiz.points, len(quiz.sheet))
}
func timer(delay int, ch chan<- int) {
time.Sleep(time.Second * time.Duration(delay))
ch <- 1
}
func startQuiz(quiz *data, ch chan<- int) {
reader := bufio.NewReader(os.Stdin)
i := 1
for k, v := range quiz.sheet {
fmt.Printf("Problem #%v: %v = ", i, k)
text, _ := reader.ReadString('\n')
// Allows to read from windows terminals
//text = strings.Replace(text, "\r\n", "", -1)
text = strings.Trim(text, "\r\n ")
//fmt.Println("\""+ text + "\"")
if text == v {
//fmt.Println("point added")
quiz.points++
}
i++
}
ch <- 1
}
func readCSV(file string) [][]string {
// opens the file
f, err := os.Open(file)
if err != nil {
log.Fatalln("Unable to read input file:", file, err)
}
defer f.Close()
// reads the the contents of the file as csv
csvReader := csv.NewReader(f)
records, err := csvReader.ReadAll()
if err != nil {
log.Fatalln("Unable to parse file as CSV for:", file, err)
}
return records
}
|
package promise
import (
"testing"
)
func Test_Future(t *testing.T) {
source := 1
future := handlerFuture(source)
ch := make(chan interface{})
go future.then(func(response interface{}) (interface{}, error) {
ch <- response
return nil, nil
})
target := <-ch
if target != source {
t.Error("Test_Future Fail", target)
}
t.Log("Test_Future Success")
}
|
package queue
type QueueArrayImpl struct {
Array []int
}
var _ Queue = &QueueArrayImpl{}
func NewQueue() Queue {
return &QueueArrayImpl{
Array: []int{},
}
}
func (q *QueueArrayImpl) Add(e int) {
q.Array = append(q.Array, e)
}
func (q *QueueArrayImpl) Remove() error {
if len(q.Array) == 0 {
return QueueEmptyError
}
q.Array = q.Array[1:]
return nil
}
func (q *QueueArrayImpl) Length() int {
return len(q.Array)
}
func (q *QueueArrayImpl) Peek() (int, error) {
if len(q.Array) == 0 {
return 0, QueueEmptyError
}
return q.Array[0], nil
}
func (q *QueueArrayImpl) Empty() bool {
return q.Length() == 0
}
|
package main
import "fmt"
func countRemoval(str string, l int, r int, dp [][]int) int{
if l > r || l == r {
return 0
}
if r == l + 1 {
if str[l] == str[r] {
return 0
} else {
return 1
}
}
if dp[l][r] != 0 {
return dp[l][r]
}
var cnt int
if str[l] == str[r] {
cnt = countRemoval(str, l + 1, r - 1, dp)
} else {
cnt = 1 + countRemoval(str, l + 1, r, dp)
cnt2 := 1 + countRemoval(str, l, r - 1, dp)
cnt3 := 2 + countRemoval(str, l + 1, r - 1, dp)
if cnt2 < cnt {
cnt = cnt2
}
if cnt3 < cnt {
cnt = cnt3
}
}
dp[l][r] = cnt
return cnt
}
func main() {
str := "aaabbbbaba"
dp := make([][]int, len(str))
for i, _ := range dp {
dp[i] = make([]int, len(str))
}
fmt.Println(countRemoval(str, 0, len(str) - 1, dp))
fmt.Println(dp)
}
|
package theory
import (
"buddin.us/eolian/dsp"
"buddin.us/musictheory"
lua "github.com/yuin/gopher-lua"
)
func newPitch(state *lua.LState) int {
p, err := musictheory.ParsePitch(state.CheckString(1))
if err != nil {
state.RaiseError("%s", err.Error())
}
state.Push(newPitchUserData(state, *p))
return 1
}
func newPitchUserData(state *lua.LState, p musictheory.Pitch) *lua.LUserData {
methods := state.NewTable()
state.SetFuncs(methods, map[string]lua.LGFunction{
"value": func(state *lua.LState) int {
pitch := state.CheckUserData(1).Value.(musictheory.Pitch)
state.Push(&lua.LUserData{
Value: dsp.Pitch{
Raw: pitch.Name(musictheory.AscNames),
Valuer: dsp.Frequency(pitch.Freq()),
},
})
return 1
},
"name": func(state *lua.LState) int {
pitch := state.CheckUserData(1).Value.(musictheory.Pitch)
strategy := state.OptString(2, "asc")
switch strategy {
case "asc", "ascending":
state.Push(lua.LString(pitch.Name(musictheory.AscNames)))
case "desc", "descending":
state.Push(lua.LString(pitch.Name(musictheory.AscNames)))
default:
state.RaiseError("unknown naming strategy %s", strategy)
}
return 1
},
"transpose": func(state *lua.LState) int {
pitch := state.CheckUserData(1).Value.(musictheory.Pitch)
userdata := state.CheckUserData(2)
if interval, ok := userdata.Value.(musictheory.Interval); ok {
state.Push(newPitchUserData(state, pitch.Transpose(interval).(musictheory.Pitch)))
return 1
}
state.RaiseError("argument is not an interval")
return 0
},
})
mt := state.NewTable()
mt.RawSetString("__index", methods)
return &lua.LUserData{
Metatable: mt,
Value: p,
}
}
|
package cli
import (
"errors"
"flag"
"os"
"strings"
"github.com/mraraneda/mrlogger"
)
//cli.FlagHandler(&sellerdni, &folio, &order)
// FlagHandler captura los flags declarados y los maneja
func FlagHandler(configfile *string) {
flag.StringVar(configfile, "config", "", "archivo de configuración de la aplicacoión")
flag.Parse()
args := strings.Builder{}
for _, v := range os.Args {
args.WriteString(v)
args.WriteString(" ")
}
mrlogger.Debug("CLI call:", args.String())
// En este map se registran los flags obligatorios
required := []string{"config"}
// Este bucle evalúa si se ingresaron los parámetros obligatorios
seen := make(map[string]bool)
flag.VisitAll(func(f *flag.Flag) {
if f.Value.String() != "" {
seen[f.Name] = true
}
})
for _, req := range required {
if !seen[req] {
err := errors.New("Falta ingresar el parámetro obligatorio: \"" + req + "\"")
mrlogger.Error("Falta ingresar el parámetro obligatorio: \"" + req + "\"")
mrlogger.Check(err, mrlogger.InThisPoint())
}
}
}
|
package main
import "fmt"
func main() {
//Map initialization
m := make(map[string]int, 2)
m["k1"] = 7
m["k2"] = 13
m["k3"] = 15
m["k4"] = 17
// fmt.Printf("\nmap:%+v; what is map:%T; \n", m, m)
fmt.Println("m:", m)
delete(m, "k1")
fmt.Printf("\nmap:%+v\n\n", m)
delete(m, "k1")
// m1 := map[string]int{
// "p1": 1,
// "p2": 2,
// }
// fmt.Println("m1:", m1)
// m1 = m
// fmt.Println("m1(afterr):", m1)
// for key, val := range m {
// fmt.Println(key, val)
// }
// // v1 := m["k1"]
// // fmt.Println("v1: ", v1)
fmt.Println("len:", len(m))
// // delete(m, "k2")
// // fmt.Println("map:", m)
// // _, prs := m["k2"]
// // fmt.Println("prs:", prs)
n := map[string]int{"foo": 1, "bar": 2}
fmt.Println("map:", n)
}
|
package check
import (
"github.com/MintegralTech/juno/index"
)
type unmarshal struct {
}
func (u *unmarshal) Unmarshal(idx index.Index, res map[string]interface{}) Checker {
if _, ok := res["check"]; ok {
var checkImpl = &CheckerImpl{}
return checkImpl.Unmarshal(idx, res)
}
if _, ok := res["in_check"]; ok {
var inCheck = &InChecker{}
return inCheck.Unmarshal(idx, res)
}
if _, ok := res["not_check"]; ok {
var notCheck = &NotChecker{}
return notCheck.Unmarshal(idx, res)
}
if _, ok := res["or_check"]; ok {
var orCheck = &OrChecker{}
return orCheck.Unmarshal(idx, res)
}
if _, ok := res["and_check"]; ok {
var andCheck = &AndChecker{}
return andCheck.Unmarshal(idx, res)
}
if _, ok := res["not_and_check"]; ok {
var notAndCheck = &NotAndChecker{}
return notAndCheck.Unmarshal(idx, res)
}
return nil
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
const (
resultLines = "*3\r\n$3\r\nSET\r\n$5\r\nprice\r\n$5\r\n99.99\r\n*3\r\n$3\r\nSET\r\n$5\r\ncolor\r\n$3\r\nred\r\n*3\r\n$3\r\nSET\r\n$4\r\nunit\r\n$7\r\nCelsius\r\n"
)
var (
fileSourceLines = []string{
"SET price 99.99",
"SET color red",
"SET unit Celsius",
}
)
func TestReadLines(t *testing.T) {
lines, err := readLines("sample_source.txt")
if err != nil {
t.Error(err)
return
}
assert.Equal(t, fileSourceLines, lines, "They should be equal")
}
func TestExists(t *testing.T) {
assert.True(t, exists("./README.md"), "README.md should exist")
assert.False(t, exists("./ABCDEFG"), "This file should not exist")
}
func TestGenerateRedisScript(t *testing.T) {
var script = generateRedisScript(fileSourceLines)
assert.Equal(t, resultLines, script, "They should be equal")
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"reflect"
"testing"
"k8s.io/component-base/metrics"
)
const fakeFilename = "testdata/metric.go"
func TestSkipMetrics(t *testing.T) {
for _, test := range []struct {
testName string
src string
}{
{
testName: "Skip alpha metric with local variable",
src: `
package test
import "k8s.io/component-base/metrics"
var name = "metric"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: name,
StabilityLevel: metrics.ALPHA,
},
)
`},
{
testName: "Skip alpha metric created via function call",
src: `
package test
import "k8s.io/component-base/metrics"
func getName() string {
return "metric"
}
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: getName(),
StabilityLevel: metrics.ALPHA,
},
)
`},
{
testName: "Skip metric without stability set",
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: "metric",
},
)
`},
{
testName: "Skip functions of similar signature (not imported from framework path) with import rename",
src: `
package test
import metrics "k8s.io/fake/path"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Skip functions of similar signature (not imported from framework path)",
src: `
package test
import "k8s.io/fake/path/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Skip . package import of non metric framework",
src: `
package test
import . "k8s.io/fake/path"
var _ = NewCounter(
&CounterOpts{
StabilityLevel: STABLE,
},
)
`},
} {
t.Run(test.testName, func(t *testing.T) {
metrics, errors := searchFileForStableMetrics(fakeFilename, test.src)
if len(metrics) != 0 {
t.Errorf("Didn't expect any stable metrics found, got: %d", len(metrics))
}
if len(errors) != 0 {
t.Errorf("Didn't expect any errors found, got: %s", errors)
}
})
}
}
func TestStableMetric(t *testing.T) {
for _, test := range []struct {
testName string
src string
metric metric
}{
{
testName: "Counter",
metric: metric{
Name: "metric",
Namespace: "namespace",
Subsystem: "subsystem",
StabilityLevel: "STABLE",
DeprecatedVersion: "1.16",
Help: "help",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: "metric",
Subsystem: "subsystem",
Namespace: "namespace",
Help: "help",
DeprecatedVersion: "1.16",
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "CounterVec",
metric: metric{
Name: "metric",
Namespace: "namespace",
Subsystem: "subsystem",
Labels: []string{"label-1"},
StabilityLevel: "STABLE",
DeprecatedVersion: "1.16",
Help: "help",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounterVec(
&metrics.CounterOpts{
Name: "metric",
Namespace: "namespace",
Subsystem: "subsystem",
Help: "help",
DeprecatedVersion: "1.16",
StabilityLevel: metrics.STABLE,
},
[]string{"label-1"},
)
`},
{
testName: "Gauge",
metric: metric{
Name: "gauge",
Namespace: "namespace",
Subsystem: "subsystem",
StabilityLevel: "STABLE",
DeprecatedVersion: "1.16",
Help: "help",
Type: gaugeMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewGauge(
&metrics.GaugeOpts{
Name: "gauge",
Namespace: "namespace",
Subsystem: "subsystem",
Help: "help",
DeprecatedVersion: "1.16",
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "GaugeVec",
metric: metric{
Name: "gauge",
Namespace: "namespace",
Subsystem: "subsystem",
StabilityLevel: "STABLE",
DeprecatedVersion: "1.16",
Help: "help",
Type: gaugeMetricType,
Labels: []string{"label-1", "label-2"},
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Name: "gauge",
Namespace: "namespace",
Subsystem: "subsystem",
Help: "help",
DeprecatedVersion: "1.16",
StabilityLevel: metrics.STABLE,
},
[]string{"label-2", "label-1"},
)
`},
{
testName: "Histogram",
metric: metric{
Name: "histogram",
Namespace: "namespace",
Subsystem: "subsystem",
DeprecatedVersion: "1.16",
StabilityLevel: "STABLE",
Buckets: []float64{0.001, 0.01, 0.1, 1, 10, 100},
Help: "help",
Type: histogramMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
Namespace: "namespace",
Subsystem: "subsystem",
StabilityLevel: metrics.STABLE,
Help: "help",
DeprecatedVersion: "1.16",
Buckets: []float64{0.001, 0.01, 0.1, 1, 10, 100},
},
)
`},
{
testName: "HistogramVec",
metric: metric{
Name: "histogram",
Namespace: "namespace",
Subsystem: "subsystem",
DeprecatedVersion: "1.16",
StabilityLevel: "STABLE",
Buckets: []float64{0.001, 0.01, 0.1, 1, 10, 100},
Help: "help",
Type: histogramMetricType,
Labels: []string{"label-1", "label-2"},
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Name: "histogram",
Namespace: "namespace",
Subsystem: "subsystem",
StabilityLevel: metrics.STABLE,
Help: "help",
DeprecatedVersion: "1.16",
Buckets: []float64{0.001, 0.01, 0.1, 1, 10, 100},
},
[]string{"label-2", "label-1"},
)
`},
{
testName: "Custom import",
metric: metric{
Name: "metric",
StabilityLevel: "STABLE",
Type: counterMetricType,
},
src: `
package test
import custom "k8s.io/component-base/metrics"
var _ = custom.NewCounter(
&custom.CounterOpts{
Name: "metric",
StabilityLevel: custom.STABLE,
},
)
`},
{
testName: "Const",
metric: metric{
Name: "metric",
StabilityLevel: "STABLE",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
const name = "metric"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: name,
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Variable",
metric: metric{
Name: "metric",
StabilityLevel: "STABLE",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var name = "metric"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: name,
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Multiple consts in block",
metric: metric{
Name: "metric",
StabilityLevel: "STABLE",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
const (
unrelated1 = "unrelated1"
name = "metric"
unrelated2 = "unrelated2"
)
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: name,
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Multiple variables in Block",
metric: metric{
Name: "metric",
StabilityLevel: "STABLE",
Type: counterMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var (
unrelated1 = "unrelated1"
name = "metric"
_ = metrics.NewCounter(
&metrics.CounterOpts{
Name: name,
StabilityLevel: metrics.STABLE,
},
)
)
`},
{
testName: "Histogram with linear buckets",
metric: metric{
Name: "histogram",
StabilityLevel: "STABLE",
Buckets: metrics.LinearBuckets(1, 1, 3),
Type: histogramMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: metrics.LinearBuckets(1, 1, 3),
},
)
`},
{
testName: "Histogram with exponential buckets",
metric: metric{
Name: "histogram",
StabilityLevel: "STABLE",
Buckets: metrics.ExponentialBuckets(1, 2, 3),
Type: histogramMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: metrics.ExponentialBuckets(1, 2, 3),
},
)
`},
{
testName: "Histogram with default buckets",
metric: metric{
Name: "histogram",
StabilityLevel: "STABLE",
Buckets: metrics.DefBuckets,
Type: histogramMetricType,
},
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: metrics.DefBuckets,
},
)
`},
} {
t.Run(test.testName, func(t *testing.T) {
metrics, errors := searchFileForStableMetrics(fakeFilename, test.src)
if len(errors) != 0 {
t.Errorf("Unexpected errors: %s", errors)
}
if len(metrics) != 1 {
t.Fatalf("Unexpected number of metrics: got %d, want 1", len(metrics))
}
if test.metric.Labels == nil {
test.metric.Labels = []string{}
}
if !reflect.DeepEqual(metrics[0], test.metric) {
t.Errorf("metric:\ngot %v\nwant %v", metrics[0], test.metric)
}
})
}
}
func TestIncorrectStableMetricDeclarations(t *testing.T) {
for _, test := range []struct {
testName string
src string
err error
}{
{
testName: "Fail on stable summary metric (Summary is DEPRECATED)",
err: fmt.Errorf("testdata/metric.go:4:9: Stable summary metric is not supported"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewSummary(
&metrics.SummaryOpts{
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Fail on stable metric with attribute set to unknown variable",
err: fmt.Errorf("testdata/metric.go:6:4: Metric attribute was not correctly set. Please use only global consts in same file"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: unknownVariable,
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Fail on stable metric with attribute set to local function return",
err: fmt.Errorf("testdata/metric.go:9:4: Non string attribute it not supported"),
src: `
package test
import "k8s.io/component-base/metrics"
func getName() string {
return "metric"
}
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: getName(),
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Fail on stable metric with attribute set to imported function return",
err: fmt.Errorf("testdata/metric.go:7:4: Non string attribute it not supported"),
src: `
package test
import "k8s.io/component-base/metrics"
import "k8s.io/kubernetes/utils"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
Name: utils.getMetricName(),
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "Fail on metric with stability set to function return",
err: fmt.Errorf("testdata/metric.go:9:20: StabilityLevel should be passed STABLE, ALPHA or removed"),
src: `
package test
import "k8s.io/component-base/metrics"
func getMetricStability() metrics.StabilityLevel {
return metrics.STABLE
}
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: getMetricsStability(),
},
)
`},
{
testName: "error for passing stability as string",
err: fmt.Errorf("testdata/metric.go:6:20: StabilityLevel should be passed STABLE, ALPHA or removed"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: "stable",
},
)
`},
{
testName: "error for passing stability as unknown const",
err: fmt.Errorf("testdata/metric.go:6:20: StabilityLevel should be passed STABLE, ALPHA or removed"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: metrics.UNKNOWN,
},
)
`},
{
testName: "error for passing stability as variable",
err: fmt.Errorf("testdata/metric.go:7:20: StabilityLevel should be passed STABLE, ALPHA or removed"),
src: `
package test
import "k8s.io/component-base/metrics"
var stable = metrics.STABLE
var _ = metrics.NewCounter(
&metrics.CounterOpts{
StabilityLevel: stable,
},
)
`},
{
testName: "error for stable metric created via function call",
err: fmt.Errorf("testdata/metric.go:6:10: Opts for STABLE metric was not directly passed to new metric function"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(getStableCounterOpts())
func getStableCounterOpts() *metrics.CounterOpts {
return &metrics.CounterOpts{
StabilityLevel: metrics.STABLE,
}
}
`},
{
testName: "error . package import of metric framework",
err: fmt.Errorf(`testdata/metric.go:3:8: Importing using "." is not supported`),
src: `
package test
import . "k8s.io/component-base/metrics"
var _ = NewCounter(
&CounterOpts{
StabilityLevel: STABLE,
},
)
`},
{
testName: "error stable metric opts passed to local function",
err: fmt.Errorf("testdata/metric.go:4:9: Opts for STABLE metric was not directly passed to new metric function"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = RegisterMetric(
&metrics.CounterOpts{
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "error stable metric opts passed to imported function",
err: fmt.Errorf("testdata/metric.go:4:9: Opts for STABLE metric was not directly passed to new metric function"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = test.RegisterMetric(
&metrics.CounterOpts{
StabilityLevel: metrics.STABLE,
},
)
`},
{
testName: "error stable metric opts passed to imported function",
err: fmt.Errorf("testdata/metric.go:6:4: Positional arguments are not supported"),
src: `
package test
import "k8s.io/component-base/metrics"
var _ = metrics.NewCounter(
&metrics.CounterOpts{
"counter",
},
)
`},
{
testName: "error stable historgram with unknown prometheus bucket variable",
err: fmt.Errorf("testdata/metric.go:9:13: Buckets should be set to list of floats, result from function call of prometheus.LinearBuckets or prometheus.ExponentialBuckets"),
src: `
package test
import "k8s.io/component-base/metrics"
import "github.com/prometheus/client_golang/prometheus"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: prometheus.FakeBuckets,
},
)
`},
{
testName: "error stable historgram with unknown bucket variable",
err: fmt.Errorf("testdata/metric.go:9:13: Buckets should be set to list of floats, result from function call of prometheus.LinearBuckets or prometheus.ExponentialBuckets"),
src: `
package test
import "k8s.io/component-base/metrics"
var buckets = []float64{1, 2, 3}
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: buckets,
},
)
`},
{
testName: "error stable historgram with unknown bucket variable from unknown library",
err: fmt.Errorf("testdata/metric.go:9:13: Buckets should be set to list of floats, result from function call of prometheus.LinearBuckets or prometheus.ExponentialBuckets"),
src: `
package test
import "k8s.io/component-base/metrics"
import "github.com/fake_prometheus/prometheus"
var _ = metrics.NewHistogram(
&metrics.HistogramOpts{
Name: "histogram",
StabilityLevel: metrics.STABLE,
Buckets: prometheus.DefBuckets,
},
)
`},
} {
t.Run(test.testName, func(t *testing.T) {
_, errors := searchFileForStableMetrics(fakeFilename, test.src)
if len(errors) != 1 {
t.Fatalf("Unexpected number of errors, got %d, want 1", len(errors))
}
if !reflect.DeepEqual(errors[0], test.err) {
t.Errorf("error:\ngot %v\nwant %v", errors[0], test.err)
}
})
}
}
|
package rabbitmq
import (
"fmt"
"log"
"github.com/streadway/amqp"
)
const (
//Direct 直行交换机
Direct = "direct"
//Fanout 扇形交换机
Fanout = "fanout"
//Topic 主题交换机
Topic = "topic"
//Headers 首部交换机
Headers = "headers"
//AllUser 所用用户
AllUser = "all"
)
//Connect 测试
type Connect struct {
conn *amqp.Connection //连接
ch *amqp.Channel //通道
exchange []Exchange //交换机
myqueue amqp.Queue //创建的队列
queue map[string]amqp.Queue //队列、名字map
}
//Exchange 交换机
type Exchange struct {
qname []string //绑定的队列
ename string //交换机名
key string //routing-key
etype string //交换机类型
}
//NewConnect 连接
func NewConnect() *Connect {
var c = new(Connect)
var err error
c.conn, err = amqp.Dial("amqp://guest:guest@localhost:5672/")
if err != nil {
log.Println("创建链接错误", err)
return nil
}
c.ch, err = c.conn.Channel()
if err != nil {
log.Println("创建通道错误:", err)
return nil
}
c.queue = make(map[string]amqp.Queue)
c.exchange = make([]Exchange, 50)
return c
}
//NewExChange 创建交换机实例
func NewExChange(ename, key, etype string) Exchange {
return Exchange{
qname: make([]string, 10),
ename: ename,
key: key,
etype: etype,
}
}
//声明队列
func (c *Connect) createQueue(name string) amqp.Queue {
queue, err := c.ch.QueueDeclare(
name,
true,
false,
false,
false,
nil,
)
if err != nil {
log.Fatal("创建队列失败:", err)
}
c.queue[name] = queue
return queue
}
//声明交换机
func (c *Connect) createExcahnge(name, key, etype string) {
err := c.ch.ExchangeDeclare(
name, //交换机名
etype, //交换机类型
true,
false,
false,
false,
nil,
)
if err != nil {
log.Fatal("声明交换机失败:", err)
}
c.exchange = append(c.exchange, NewExChange(name, key, etype))
}
//绑定
func (c *Connect) bind(qname, ename, key string) {
err := c.ch.QueueBind(
qname, //队列名,name
key, //binding-key 交换类型,一个交换机可以绑定多个队列
ename, //交换机名
false,
nil,
)
if err != nil {
log.Fatal("绑定交换机队列失败:", err)
}
}
//SendMsg 发送消息 参数:发送的消息,交换机名称,routing-key,交换机类型
func (c *Connect) SendMsg(msg []byte, ename, key, etype string) {
//如果没有该交换机,就创建一个
mark := 0
for _, v := range c.exchange {
if v.ename == ename {
mark = 1
break
}
}
if mark == 0 {
c.createExcahnge(ename, key, etype)
}
err := c.ch.Publish(
ename, //交换机名
key, //routing-key
false,
false,
amqp.Publishing{
DeliveryMode: amqp.Persistent, //将消息持久化
ContentType: "text/plain",
Body: msg,
},
)
if err != nil {
log.Println("发送消息失败:", err)
return
}
fmt.Println("发送成功!")
}
//Receive 接收消息 参数:队列名,交换机名,routing-key
func (c *Connect) Receive(qname, ename, key string) []byte {
mark := 0
//如果没有该队列,就创建一个
for k := range c.queue {
if k == qname {
mark = 1
break
}
}
if mark == 0 {
c.createQueue(qname)
}
//绑定
c.bind(qname, ename, key)
//公平调度
err := c.ch.Qos(
1, // prefetch count,prefetch count为 1 时,
//只有当消费者消费完消息并返回ack确认后RabbitMQ才会给其分发消息,否则只会将消息分发给其他空闲状态的消费者
0, // prefetch size
false, // global
)
if err != nil {
log.Println(err)
}
msg, err := c.ch.Consume(
qname, //队列名
"", // consumer
false,
false,
false,
false,
nil,
)
if err != nil {
log.Println("接收消息失败:", err)
}
b := <-msg
fmt.Printf("msg:%s\n", b.Body)
b.Ack(false)
return b.Body
}
|
package modules
import (
"errors"
"fmt"
jwt "github.com/dgrijalva/jwt-go"
"github.com/sirupsen/logrus"
)
var (
_UserTokenHeaderName = ""
_SecretKeys = ""
_SystemToken = ""
)
type UserMeta struct {
UserID uint
GroupID uint
Token string
}
type CircleCustomClaims struct {
UserID uint
GroupID uint
jwt.StandardClaims
}
type getExistsObjectInUserFunc func(uint) (bool, error)
//BaseController ...
type BaseUserController struct {
BaseCrudController
CurrentUserMeta *UserMeta
}
// Prepare ...
func (c *BaseUserController) Prepare() {
logrus.Debug("UserBaseController", "Prepare")
var err error
if c.CurrentUserMeta, err = c.GetCurrentUserMeta(); err != nil {
c.ErrorAbort(401, nil)
}
//TODO: User 권한 맵. 403.
//TODO: User 승인 여부 맵. 403.
c.BaseCrudController.Prepare()
}
func (c *BaseUserController) GetCurrentUserMeta() (*UserMeta, error) {
tokenString := c.Ctx.Request.Header.Get(_UserTokenHeaderName)
logrus.Debug("전달 받은 토큰", tokenString)
if tokenString == _SystemToken {
logrus.Debug("전달 받은 토큰 : System Token")
return &UserMeta{
UserID: 1,
Token: tokenString,
}, nil
}
return GetCurrentUserMeta(tokenString)
}
func GetCurrentUserMeta(tokenString string) (*UserMeta, error) {
if tokenString == "" {
return nil, ErrUnauthorized
}
token, err := parseToken(tokenString)
if err != nil {
logrus.Debug("전달 받은 토큰 에러", err)
return nil, err
}
return getCurrentUserMetaByToken(token)
}
func parseToken(tokenString string) (*jwt.Token, error) {
token, err := jwt.ParseWithClaims(tokenString, &CircleCustomClaims{}, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(_SecretKeys), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func (c *BaseUserController) checkExistsObject(getExistsObjectFunc getExistsObjectInUserFunc, id uint, ErrRecordNotFoundMsg string) {
exists, err := getExistsObjectFunc(id)
if err != nil {
c.ErrorAbort(500, err)
}
if !exists {
c.ErrorAbort(400, errors.New(ErrRecordNotFoundMsg))
}
}
func getCurrentUserMetaByToken(token *jwt.Token) (*UserMeta, error) {
userMeta := &UserMeta{
Token: token.Raw,
}
if claims, ok := token.Claims.(*CircleCustomClaims); ok && token.Valid {
userMeta.UserID = claims.UserID
userMeta.GroupID = claims.GroupID
} else {
return nil, ErrInvalidToken
}
return userMeta, nil
}
|
package test_test
import (
. "static_proxy_server/lib/test"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var numberset = []struct {
x int
y int
result int
}{
{1, 2, 3},
{1, 2, 3},
{2, 4, 6},
}
var _ = Describe("Test", func() {
// var book Book
// BeforeEach(func() {
// book = NewBookFromJSON(`{
// "title":"Les Miserables",
// "author":"Victor Hugo",
// "pages":1488
// }`)
// })
Describe("#Add", func(){
It("should eq expect", func(){
for _, set := range numberset {
Expect(Add(set.x, set.y)).To(Equal(set.result))
// Expect(Add(set.x, set.y)).ShouldNot(Equal(set.result))
}
})
})
})
|
// Copyright 2016 The G3N Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package geometry implements several primitive geometry generators.
package geometry
import (
"github.com/hecate-tech/engine/gls"
"github.com/hecate-tech/engine/math32"
"github.com/hecate-tech/engine/util/logger"
"strconv"
)
// Package logger
var log = logger.New("GEOMETRY", logger.Default)
// IGeometry is the interface for all geometries.
type IGeometry interface {
GetGeometry() *Geometry
RenderSetup(gs *gls.GLS)
Dispose()
}
// Geometry encapsulates a three-dimensional vertex-based geometry.
type Geometry struct {
gs *gls.GLS // Reference to OpenGL state (valid after first RenderSetup)
refcount int // Current number of references
groups []Group // Array geometry groups
vbos []*gls.VBO // Array of VBOs
handleVAO uint32 // Handle to OpenGL VAO
indices math32.ArrayU32 // Buffer with indices
handleIndices uint32 // Handle to OpenGL buffer for indices
updateIndices bool // Flag to indicate that indices must be transferred
ShaderDefines gls.ShaderDefines // Geometry-specific shader defines
// Geometric properties
boundingBox math32.Box3 // Last calculated bounding box
boundingSphere math32.Sphere // Last calculated bounding sphere
area float32 // Last calculated area
volume float32 // Last calculated volume
rotInertia math32.Matrix3 // Last calculated rotational inertia matrix
// Flags indicating whether geometric properties are valid
boundingBoxValid bool // Indicates if last calculated bounding box is valid
boundingSphereValid bool // Indicates if last calculated bounding sphere is valid
areaValid bool // Indicates if last calculated area is valid
volumeValid bool // Indicates if last calculated volume is valid
rotInertiaValid bool // Indicates if last calculated rotational inertia matrix is valid
}
// Group is a geometry group object.
type Group struct {
Start int // Index of first element of the group
Count int // Number of elements in the group
Matindex int // Material index for this group
Matid string // Material id used when loading external models
}
// NewGeometry creates and returns a pointer to a new Geometry.
func NewGeometry() *Geometry {
g := new(Geometry)
g.Init()
return g
}
// Init initializes the geometry.
func (g *Geometry) Init() {
g.refcount = 1
g.vbos = make([]*gls.VBO, 0)
g.groups = make([]Group, 0)
g.gs = nil
g.handleVAO = 0
g.handleIndices = 0
g.updateIndices = true
g.ShaderDefines = *gls.NewShaderDefines()
}
// GetGeometry satisfies the IGeometry interface.
func (g *Geometry) GetGeometry() *Geometry {
return g
}
// AddGroup adds a geometry group (for multimaterial).
func (g *Geometry) AddGroup(start, count, matIndex int) *Group {
g.groups = append(g.groups, Group{start, count, matIndex, ""})
return &g.groups[len(g.groups)-1]
}
// AddGroupList adds the specified list of groups to this geometry.
func (g *Geometry) AddGroupList(groups []Group) {
for _, group := range groups {
g.groups = append(g.groups, group)
}
}
// GroupCount returns the number of geometry groups (for multimaterial).
func (g *Geometry) GroupCount() int {
return len(g.groups)
}
// GroupAt returns pointer to geometry group at the specified index.
func (g *Geometry) GroupAt(idx int) *Group {
return &g.groups[idx]
}
// SetIndices sets the indices array for this geometry.
func (g *Geometry) SetIndices(indices math32.ArrayU32) {
g.indices = indices
g.updateIndices = true
g.boundingBoxValid = false
g.boundingSphereValid = false
}
// Indices returns the indices array for this geometry.
func (g *Geometry) Indices() math32.ArrayU32 {
return g.indices
}
// AddVBO adds a Vertex Buffer Object for this geometry.
func (g *Geometry) AddVBO(vbo *gls.VBO) {
// Check that the provided VBO doesn't have conflicting attributes with existing VBOs
for _, existingVbo := range g.vbos {
for _, attrib := range vbo.Attributes() {
if existingVbo.AttribName(attrib.Name) != nil {
panic("Geometry.AddVBO: geometry already has a VBO with attribute name:" + attrib.Name)
}
if attrib.Type != gls.Undefined && existingVbo.Attrib(attrib.Type) != nil {
panic("Geometry.AddVBO: geometry already has a VBO with attribute type:" + strconv.Itoa(int(attrib.Type)))
}
}
}
g.vbos = append(g.vbos, vbo)
}
// VBO returns a pointer to this geometry's VBO which contain the specified attribute.
// Returns nil if the VBO is not found.
func (g *Geometry) VBO(atype gls.AttribType) *gls.VBO {
for _, vbo := range g.vbos {
if vbo.Attrib(atype) != nil {
return vbo
}
}
return nil
}
// VBOName returns a pointer to this geometry's VBO which contain the specified attribute.
// Returns nil if the VBO is not found.
func (g *Geometry) VBOName(name string) *gls.VBO {
for _, vbo := range g.vbos {
if vbo.AttribName(name) != nil {
return vbo
}
}
return nil
}
// VBOs returns all of this geometry's VBOs.
func (g *Geometry) VBOs() []*gls.VBO {
return g.vbos
}
// Items returns the number of items in the first VBO.
// (The number of items should be same for all VBOs)
// An item is a complete group of attributes in the VBO buffer.
func (g *Geometry) Items() int {
if len(g.vbos) == 0 {
return 0
}
vbo := g.vbos[0]
if vbo.AttribCount() == 0 {
return 0
}
return vbo.Buffer().Bytes() / vbo.StrideSize()
}
// SetAttributeName sets the name of the VBO attribute associated with the provided attribute type.
func (g *Geometry) SetAttributeName(atype gls.AttribType, attribName string) {
vbo := g.VBO(atype)
if vbo != nil {
vbo.Attrib(atype).Name = attribName
}
}
// AttributeName returns the name of the VBO attribute associated with the provided attribute type.
func (g *Geometry) AttributeName(atype gls.AttribType) string {
return g.VBO(atype).Attrib(atype).Name
}
// OperateOnVertices iterates over all the vertices and calls
// the specified callback function with a pointer to each vertex.
// The vertex pointers can be modified inside the callback and
// the modifications will be applied to the buffer at each iteration.
// The callback function returns false to continue or true to break.
func (g *Geometry) OperateOnVertices(cb func(vertex *math32.Vector3) bool) {
// Get buffer with position vertices
vbo := g.VBO(gls.VertexPosition)
if vbo == nil {
return
}
vbo.OperateOnVectors3(gls.VertexPosition, cb)
// Geometric properties may have changed
g.boundingBoxValid = false
g.boundingSphereValid = false
g.areaValid = false
g.volumeValid = false
g.rotInertiaValid = false
}
// ReadVertices iterates over all the vertices and calls
// the specified callback function with the value of each vertex.
// The callback function returns false to continue or true to break.
func (g *Geometry) ReadVertices(cb func(vertex math32.Vector3) bool) {
// Get buffer with position vertices
vbo := g.VBO(gls.VertexPosition)
if vbo == nil {
return
}
vbo.ReadVectors3(gls.VertexPosition, cb)
}
// OperateOnVertexNormals iterates over all the vertex normals
// and calls the specified callback function with a pointer to each normal.
// The vertex pointers can be modified inside the callback and
// the modifications will be applied to the buffer at each iteration.
// The callback function returns false to continue or true to break.
func (g *Geometry) OperateOnVertexNormals(cb func(normal *math32.Vector3) bool) {
// Get buffer with position vertices
vbo := g.VBO(gls.VertexNormal)
if vbo == nil {
return
}
vbo.OperateOnVectors3(gls.VertexNormal, cb)
}
// ReadVertexNormals iterates over all the vertex normals and calls
// the specified callback function with the value of each normal.
// The callback function returns false to continue or true to break.
func (g *Geometry) ReadVertexNormals(cb func(vertex math32.Vector3) bool) {
// Get buffer with position vertices
vbo := g.VBO(gls.VertexNormal)
if vbo == nil {
return
}
vbo.ReadVectors3(gls.VertexNormal, cb)
}
// ReadFaces iterates over all the vertices and calls
// the specified callback function with face-forming vertex triples.
// The callback function returns false to continue or true to break.
func (g *Geometry) ReadFaces(cb func(vA, vB, vC math32.Vector3) bool) {
// Get buffer with position vertices
vbo := g.VBO(gls.VertexPosition)
if vbo == nil {
return
}
// If geometry has indexed vertices need to loop over indexes
if g.Indexed() {
var vA, vB, vC math32.Vector3
positions := vbo.Buffer()
for i := 0; i < g.indices.Size(); i += 3 {
// Get face vertices
positions.GetVector3(int(3*g.indices[i]), &vA)
positions.GetVector3(int(3*g.indices[i+1]), &vB)
positions.GetVector3(int(3*g.indices[i+2]), &vC)
// Call callback with face vertices
brk := cb(vA, vB, vC)
if brk {
break
}
}
} else {
// Geometry does NOT have indexed vertices - can read vertices in sequence
vbo.ReadTripleVectors3(gls.VertexPosition, cb)
}
}
// TODO Read and Operate on Texcoords, Faces, Edges, FaceNormals, etc...
// Indexed returns whether the geometry is indexed or not.
func (g *Geometry) Indexed() bool {
return g.indices.Size() > 0
}
// BoundingBox computes the bounding box of the geometry if necessary
// and returns is value.
func (g *Geometry) BoundingBox() math32.Box3 {
// If valid, return its value
if g.boundingBoxValid {
return g.boundingBox
}
// Reset bounding box
g.boundingBox.Min.Set(0, 0, 0)
g.boundingBox.Max.Set(0, 0, 0)
// Expand bounding box by each vertex
g.ReadVertices(func(vertex math32.Vector3) bool {
g.boundingBox.ExpandByPoint(&vertex)
return false
})
g.boundingBoxValid = true
return g.boundingBox
}
// BoundingSphere computes the bounding sphere of this geometry
// if necessary and returns its value.
func (g *Geometry) BoundingSphere() math32.Sphere {
// If valid, return its value
if g.boundingSphereValid {
return g.boundingSphere
}
// Reset radius, calculate bounding box and copy center
g.boundingSphere.Radius = float32(0)
box := g.BoundingBox()
box.Center(&g.boundingSphere.Center)
// Find the radius of the bounding sphere
maxRadiusSq := float32(0)
g.ReadVertices(func(vertex math32.Vector3) bool {
maxRadiusSq = math32.Max(maxRadiusSq, g.boundingSphere.Center.DistanceToSquared(&vertex))
return false
})
g.boundingSphere.Radius = float32(math32.Sqrt(maxRadiusSq))
g.boundingSphereValid = true
return g.boundingSphere
}
// Area returns the surface area.
// NOTE: This only works for triangle-based meshes.
func (g *Geometry) Area() float32 {
// If valid, return its value
if g.areaValid {
return g.area
}
// Reset area
g.area = 0
// Sum area of all triangles
g.ReadFaces(func(vA, vB, vC math32.Vector3) bool {
vA.Sub(&vC)
vB.Sub(&vC)
vC.CrossVectors(&vA, &vB)
g.area += vC.Length() / 2.0
return false
})
g.areaValid = true
return g.area
}
// Volume returns the volume.
// NOTE: This only works for closed triangle-based meshes.
func (g *Geometry) Volume() float32 {
// If valid, return its value
if g.volumeValid {
return g.volume
}
// Reset volume
g.volume = 0
// Calculate volume of all tetrahedrons
g.ReadFaces(func(vA, vB, vC math32.Vector3) bool {
vA.Sub(&vC)
vB.Sub(&vC)
g.volume += vC.Dot(vA.Cross(&vB)) / 6.0
return false
})
g.volumeValid = true
return g.volume
}
// RotationalInertia returns the rotational inertia tensor, also known as the moment of inertia.
// This assumes constant density of 1 (kg/m^2).
// To adjust for a different constant density simply scale the returning matrix by the density.
func (g *Geometry) RotationalInertia(mass float32) math32.Matrix3 {
// If valid, return its value
if g.rotInertiaValid {
return g.rotInertia
}
// Reset rotational inertia
g.rotInertia.Zero()
// For now approximate result based on bounding box
b := math32.NewVec3()
box := g.BoundingBox()
box.Size(b)
multiplier := mass / 12.0
x := (b.Y*b.Y + b.Z*b.Z) * multiplier
y := (b.X*b.X + b.Z*b.Z) * multiplier
z := (b.Y*b.Y + b.X*b.X) * multiplier
g.rotInertia.Set(
x, 0, 0,
0, y, 0,
0, 0, z,
)
return g.rotInertia
}
// ProjectOntoAxis projects the geometry onto the specified axis,
// effectively squashing it into a line passing through the local origin.
// Returns the maximum and the minimum values on that line (i.e. signed distances from the local origin).
func (g *Geometry) ProjectOntoAxis(localAxis *math32.Vector3) (float32, float32) {
var max, min float32
g.ReadVertices(func(vertex math32.Vector3) bool {
val := vertex.Dot(localAxis)
if val > max {
max = val
}
if val < min {
min = val
}
return false
})
return max, min
}
// TODO:
// https://stackoverflow.com/questions/21640545/how-to-check-for-convexity-of-a-3d-mesh
// func (g *Geometry) IsConvex() bool {
//
// {
// ApplyMatrix multiplies each of the geometry position vertices
// by the specified matrix and apply the correspondent normal
// transform matrix to the geometry normal vectors.
// The geometry's bounding box and sphere are recomputed if needed.
func (g *Geometry) ApplyMatrix(m *math32.Matrix4) {
// Apply matrix to all vertices
g.OperateOnVertices(func(vertex *math32.Vector3) bool {
vertex.ApplyMatrix4(m)
return false
})
// Apply normal matrix to all normal vectors
var normalMatrix math32.Matrix3
normalMatrix.GetNormalMatrix(m)
g.OperateOnVertexNormals(func(normal *math32.Vector3) bool {
normal.ApplyMatrix3(&normalMatrix).Normalize()
return false
})
}
// Incref increments the reference count for this geometry
// and returns a pointer to the geometry.
// It should be used when this geometry is shared by another
// Graphic object.
func (g *Geometry) Incref() *Geometry {
g.refcount++
return g
}
// Dispose decrements this geometry reference count and
// if possible releases OpenGL resources, C memory
// and VBOs associated with this geometry.
func (g *Geometry) Dispose() {
// Only dispose if last
if g.refcount > 1 {
g.refcount--
return
}
// Delete VAO and indices buffer
if g.gs != nil {
g.gs.DeleteVertexArrays(g.handleVAO)
g.gs.DeleteBuffers(g.handleIndices)
}
// Delete VBOs
for i := 0; i < len(g.vbos); i++ {
g.vbos[i].Dispose()
}
g.Init()
}
// RenderSetup is called by the renderer before drawing the geometry.
func (g *Geometry) RenderSetup(gs *gls.GLS) {
// First time initialization
if g.gs == nil {
// Generate VAO
g.handleVAO = gs.GenVertexArray()
// Generate buffer for indices
g.handleIndices = gs.GenBuffer()
// Save pointer to gs indicating initialization was done
g.gs = gs
}
// Update VBOs
gs.BindVertexArray(g.handleVAO)
for _, vbo := range g.vbos {
vbo.Transfer(gs)
}
// Update Indices buffer if necessary
if g.indices.Size() > 0 && g.updateIndices {
gs.BindBuffer(gls.ELEMENT_ARRAY_BUFFER, g.handleIndices)
gs.BufferData(gls.ELEMENT_ARRAY_BUFFER, g.indices.Bytes(), g.indices.ToUint32(), gls.STATIC_DRAW)
g.updateIndices = false
}
}
|
package common
import (
"github.com/streadway/amqp"
)
var conn *amqp.Connection
var ch *amqp.Channel
var q amqp.Queue
var config Config
// Closeq closes the queue
func Closeq() {
ch.Close()
conn.Close()
}
// Connq connects to the queue and channel
func Connq() {
var err error
config = LoadConfig()
conn, err = amqp.Dial(config.Queue)
FailOnError(err, "failed to connect to queue")
ch, err = conn.Channel()
FailOnError(err, "failed to connect to channel")
err = ch.Qos(
1, // prefetch count
0, // prefetch size
false, // global
)
FailOnError(err, "failed to set qos")
delcareq("testing")
}
func delcareq(name string) {
var err error
q, err = ch.QueueDeclare(
name, // names
false, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
FailOnError(err, "failed to declare queue")
}
// Commitq commits the given message to the queue
func Commitq(msg []byte) {
err := ch.Publish(
"", // exchange
q.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
DeliveryMode: amqp.Persistent,
ContentType: "application/json",
Body: msg,
})
FailOnError(err, "failed to send message to queue")
}
// Readq will start reading messages from the q
func Readq() {
}
// Getq returns the current queue
func Getq() amqp.Queue {
return q
}
// GetCh returns the current channel
func GetCh() *amqp.Channel {
return ch
}
func main() {
}
|
package datastoresql_test
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
"github.com/direktiv/direktiv/pkg/refactor/database"
"github.com/direktiv/direktiv/pkg/refactor/datastore/datastoresql"
"github.com/direktiv/direktiv/pkg/refactor/logengine"
"github.com/google/uuid"
)
func Test_LogStoreAddGet(t *testing.T) {
db, err := database.NewMockGorm()
if err != nil {
t.Fatalf("unepxected NewMockGorm() error = %v", err)
}
ds := datastoresql.NewSQLStore(db, "some_secret_key_")
logstore := ds.Logs()
id := uuid.New()
addRandomMsgs(t, logstore, "source", id, logengine.Info)
q := make(map[string]interface{}, 0)
q["level"] = logengine.Info
q["source"] = id
got, _, err := logstore.Get(context.Background(), q, -1, -1)
if err != nil {
t.Error(err)
}
if len(got) < 1 {
t.Error("got no results")
}
}
func Test_Callpath(t *testing.T) {
db, err := database.NewMockGorm()
if err != nil {
t.Fatalf("unepxected NewMockGorm() error = %v", err)
}
ds := datastoresql.NewSQLStore(db, "some_secret_key_")
logstore := ds.Logs()
id := uuid.New()
tags := make(map[string]interface{})
tags["log_instance_call_path"] = "/" + id.String()
err = logstore.Append(context.Background(), time.Now().UTC(), logengine.Debug, "testing callpath", tags)
if err != nil {
t.Error(err)
}
q := make(map[string]interface{}, 0)
q["log_instance_call_path"] = "/" + id.String()
got, _, err := logstore.Get(context.Background(), q, -1, -1)
if err != nil {
t.Error(err)
}
if len(got) < 1 {
t.Error("got no results")
}
tags["log_instance_call_path"] = "/" + id.String() + "/" + uuid.NewString()
err = logstore.Append(context.Background(), time.Now().UTC(), logengine.Debug, "testing callpath", tags)
if err != nil {
t.Error(err)
}
q = make(map[string]interface{}, 0)
q["log_instance_call_path"] = "/" + id.String()
got, _, err = logstore.Get(context.Background(), q, -1, -1)
if err != nil {
t.Error(err)
}
if len(got) < 1 {
t.Error("got no results")
}
}
func addRandomMsgs(t *testing.T, logstore logengine.LogStore, col string, id uuid.UUID, level logengine.LogLevel) {
t.Helper()
want := []string{}
c := rand.Intn(20) + 1 //nolint:gosec
for i := 0; i < c; i++ {
want = append(want, fmt.Sprintf("test msg %d", rand.Intn(100)+1)) //nolint:gosec
}
in := map[string]interface{}{}
in[col] = id
for _, v := range want {
err := logstore.Append(context.Background(), time.Now().UTC(), level, v, in)
if err != nil {
t.Error(err)
}
}
q := map[string]interface{}{}
q[col] = id
got, count, err := logstore.Get(context.Background(), q, -1, -1)
if err != nil {
t.Error(err)
}
if count != c {
t.Errorf("got wrong total count Want %v got %v", c, count)
}
if len(got) != len(want) {
t.Error("got wrong number of results.")
}
for _, le := range got {
ok := false
for _, v := range want {
ok = ok || v == le.Msg
}
if !ok {
t.Errorf("log entry is not found %s", le.Msg)
}
res, ok := le.Fields["level"]
if !ok {
t.Error("missing level value")
}
levels := []string{"debug", "info", "error"}
wantLevelValue := levels[level]
gotLevelValue := fmt.Sprintf("%v", res)
if wantLevelValue != gotLevelValue {
t.Errorf("wanted level %s got %s", wantLevelValue, res)
}
}
}
|
package main
import (
"fmt"
"github.com/raypereda/fibonacci/fib"
)
func main() {
fmt.Println("Hi")
for i := 0; i < 10; i++ {
fmt.Println(i, "fibonacci number is", fib.Fib1(i))
}
} |
package util
import (
crand "crypto/rand"
"encoding/base64"
"encoding/json"
"time"
)
// timeout工具类
func Timeout(ch chan interface{}, timeout time.Duration) (val interface{}, isTimeout bool) {
timeCh := make(chan bool, 1)
go func() {
time.Sleep(timeout)
timeCh <- true
}()
select {
case val := <-ch:
return val, false
case <-timeCh:
// election timeout,转变状态,开始一轮选举
close(timeCh)
isTimeout = true
return
}
}
func GetMillDuration(i int64) time.Duration {
return time.Duration(i * int64(time.Millisecond))
}
func CloseChan(ch chan interface{}) {
close(ch)
for _, open := <-ch; open; {
_, open = <-ch
}
}
func Min(x, y int) int {
if x > y {
return y
}
return x
}
func Max(x, y int) int {
if x > y {
return x
}
return y
}
func String(v interface{}) string {
bytes, _ := json.Marshal(v)
return string(bytes)
}
func Randstring(n int) string {
b := make([]byte, 2*n)
crand.Read(b)
s := base64.URLEncoding.EncodeToString(b)
return s[0:n]
} |
package mredis
import (
"sync"
"github.com/garyburd/redigo/redis"
"time"
"strings"
"github.com/2liang/mcache/modules/utils/setting"
)
type RedisOption struct {
Timeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
Db int
MHosts string
SHosts string
}
type BaseRedis struct {
Mutex *sync.Mutex
mredis *redis.Pool
sredis []*redis.Pool
}
//type CommandType struct {
// Get string
// Del string
//}
//
//type RedisCommand struct {
// String CommandType
// Hash CommandType
// List CommandType
// Set CommandType
// Zset CommandType
//}
//
//func (rc *RedisCommand) InitRedisCommand() {
// rc.String.Get = "Get"
// rc.String.Del = "Del"
//}
//
//var RedisCommand = new(RedisCommand)
func (b *BaseRedis) InitRedis(option *RedisOption) {
b.mredis = &redis.Pool{
MaxIdle: 5,
MaxActive: 500,
IdleTimeout: 30 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.DialTimeout("tcp", option.MHosts, option.Timeout * time.Second, option.ReadTimeout * time.Second, option.WriteTimeout * time.Second)
if err != nil {
return nil, err
}
if _, err := c.Do("SELECT", option.Db); err != nil {
c.Close()
return nil, err
}
return c, nil
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Minute {
return nil
}
_, err := c.Do("PING")
return err
},
}
shosts := strings.Split(option.SHosts, ",")
if len(shosts) < 1 {
setting.Logger.Panic("The redis slave init error")
}
b.sredis = make([]*redis.Pool, 0)
for i := 0; i < len(shosts); i++ {
func(i int) {
sredis_pool := &redis.Pool{
MaxIdle: 5,
MaxActive: 500,
IdleTimeout: 30 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.DialTimeout("tcp", shosts[i], option.Timeout * time.Second, option.ReadTimeout * time.Second, option.WriteTimeout * time.Second)
if err != nil {
return nil, err
}
if _, err := c.Do("SELECT", option.Db); err != nil {
c.Close()
return nil, err
}
return c, nil
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Minute {
return nil
}
_, err := c.Do("PING")
return err
},
}
b.sredis = append(b.sredis, sredis_pool)
}(i)
}
}
func (b *BaseRedis) getMredis() redis.Conn {
return b.mredis.Get()
}
func (b *BaseRedis) getSredis() redis.Conn {
return b.sredis[0].Get()
}
func (b *BaseRedis) Get(key string) (r interface{}, err error) {
conn := b.getSredis()
return conn.Do("GET", key)
}
func (b *BaseRedis) Set(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("SET", v...)
}
func (b *BaseRedis) Del(key string) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("DEL", key)
}
func (b *BaseRedis) Exists(k string) (r interface{}, err error) {
conn := b.getSredis()
return conn.Do("EXISTS", k)
}
func (b *BaseRedis) Expire(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("EXPIRE", v...)
}
func (b *BaseRedis) Incrby(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("INCRBY", v...)
}
func (b *BaseRedis) Decrby(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("DECRBY", v...)
}
func (b *BaseRedis) RPush(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("RPUSH", v...)
}
func (b *BaseRedis) LPop(k string) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("LPOP", k)
}
func (b *BaseRedis) LLen(k string) (r interface{}, err error) {
conn := b.getSredis()
return conn.Do("LLEN", k)
}
func (b *BaseRedis) LRem(v ...interface{}) (r interface{}, err error) {
conn := b.getMredis()
return conn.Do("LRem", v...)
}
//func (b *BaseRedis) Do (command string, v ...interface{}) (r interface{}, err error) {
//
//} |
// Package bcd provides functions for converting integers to BCD byte array and vice versa.
package bcd
func pow100(power byte) uint64 {
res := uint64(1)
for i := byte(0); i < power; i++ {
res *= 100
}
return res
}
func FromUint(value uint64, size int) []byte {
buf := make([]byte, size)
if value > 0 {
remainder := value
for pos := size - 1; pos >= 0 && remainder > 0; pos-- {
tail := byte(remainder % 100)
hi, lo := tail/10, tail%10
buf[pos] = byte(hi<<4 + lo)
remainder = remainder / 100
}
}
return buf
}
// Returns uint8 value in BCD format.
//
// If value > 99, function returns value for last two digits of source value
// (Example: uint8(123) = uint8(0x23)).
func FromUint8(value uint8) byte {
return FromUint(uint64(value), 1)[0]
}
// Returns two-bytes array with uint16 value in BCD format
//
// If value > 9999, function returns value for last two digits of source value
// (Example: uint8(12345) = []byte{0x23, 0x45}).
func FromUint16(value uint16) []byte {
return FromUint(uint64(value), 2)
}
// Returns four-bytes array with uint32 value in BCD format
//
// If value > 99999999, function returns value for last two digits of source value
// (Example: uint8(1234567890) = []byte{0x23, 0x45, 0x67, 0x89}).
func FromUint32(value uint32) []byte {
return FromUint(uint64(value), 4)
}
// Returns eight-bytes array with uint64 value in BCD format
//
// If value > 9999999999999999, function returns value for last two digits of source value
// (Example: uint8(12233445566778899) = []byte{0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99}).
func FromUint64(value uint64) []byte {
return FromUint(value, 8)
}
func toUint(value []byte, size int) uint64 {
vlen := len(value)
if vlen > size {
value = value[vlen-size:]
}
res := uint64(0)
for i, b := range value {
hi, lo := b>>4, b&0x0f
if hi > 9 || lo > 9 {
return 0
}
res += uint64(hi*10+lo) * pow100(byte(vlen-i)-1)
}
return res
}
// Returns uint8 value converted from bcd byte.
//
// If byte is not BCD (e.g. 0x1A), function returns zero.
func ToUint8(value byte) uint8 {
return uint8(toUint([]byte{value}, 1))
}
// Return uint16 value converted from at most last two bytes of bcd bytes array.
//
// If any byte of used array part is not BCD (e.g 0x1A), function returns zero.
func ToUint16(value []byte) uint16 {
return uint16(toUint(value, 2))
}
// Return uint32 value converted from at most last four bytes of bcd bytes array.
//
// If any byte of used array part is not BCD (e.g 0x1A), function returns zero.
func ToUint32(value []byte) uint32 {
return uint32(toUint(value, 4))
}
// Return uint64 value converted from at most last eight bytes of bcd bytes array.
//
// If any byte of used array part is not BCD (e.g 0x1A), function returns zero.
func ToUint64(value []byte) uint64 {
return toUint(value, 8)
}
|
package model
//供应商:Supplier
//Id
//供应商编码 SupplierCode
//供应商名称 SupplierName
//供应商id SupplierId foreign key(supplierId) references Order(id) on delete cascade
//联系人 Contact
//联系电话 ContactNumber
//联系地址 ContactAddress
//传真 Fax
//描述 Describe
type Supplier struct {
Id int
SupplierCode string
SupplierName string
SupplierId int
Contact string
ContactNumber int
ContactAddress string
Fax string
Describe string
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/golang/mock/gomock"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/repo"
"github.com/kubernetes-sigs/minibroker/pkg/helm"
"github.com/kubernetes-sigs/minibroker/pkg/helm/mocks"
"github.com/kubernetes-sigs/minibroker/pkg/log"
)
var _ = Describe("Helm", func() {
Context("Client", func() {
var ctrl *gomock.Controller
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
})
AfterEach(func() {
ctrl.Finish()
})
Describe("NewDefaultClient", func() {
It("should create a new Client", func() {
client := helm.NewDefaultClient()
Expect(client).NotTo(BeNil())
})
})
Describe("Initialize", func() {
It("should fail when repoInitializer.Initialize fails", func() {
repoClient := mocks.NewMockRepositoryInitializeDownloadLoader(ctrl)
repoClient.EXPECT().
Initialize(gomock.Any(), gomock.Any()).
Return(nil, fmt.Errorf("amazing repoInitializer failure")).
Times(1)
client := helm.NewClient(
log.NewNoop(),
repoClient,
nil,
)
err := client.Initialize("")
Expect(err).To(Equal(fmt.Errorf("failed to initialize helm client: amazing repoInitializer failure")))
})
It("should fail when repoDownloader.DownloadIndex fails", func() {
repoClient := mocks.NewMockRepositoryInitializeDownloadLoader(ctrl)
chartRepo := &repo.ChartRepository{}
repoClient.EXPECT().
Initialize(gomock.Any(), gomock.Any()).
Return(chartRepo, nil).
Times(1)
repoClient.EXPECT().
DownloadIndex(chartRepo).
Return("", fmt.Errorf("awesome repoDownloader error")).
Times(1)
client := helm.NewClient(
log.NewNoop(),
repoClient,
nil,
)
err := client.Initialize("")
Expect(err).To(Equal(fmt.Errorf("failed to initialize helm client: awesome repoDownloader error")))
})
It("should fail when repoLoader.Load fails", func() {
repoClient := mocks.NewMockRepositoryInitializeDownloadLoader(ctrl)
chartRepo := &repo.ChartRepository{}
repoClient.EXPECT().
Initialize(gomock.Any(), gomock.Any()).
Return(chartRepo, nil).
Times(1)
indexPath := "some_path.yaml"
repoClient.EXPECT().
DownloadIndex(chartRepo).
Return(indexPath, nil).
Times(1)
repoClient.EXPECT().
Load(indexPath).
Return(nil, fmt.Errorf("marvelous repoLoader fault")).
Times(1)
client := helm.NewClient(
log.NewNoop(),
repoClient,
nil,
)
err := client.Initialize("")
Expect(err).To(Equal(fmt.Errorf("failed to initialize helm client: marvelous repoLoader fault")))
})
It("should succeed", func() {
repoClient := mocks.NewMockRepositoryInitializeDownloadLoader(ctrl)
chartRepo := &repo.ChartRepository{}
repoClient.EXPECT().
Initialize(gomock.Any(), gomock.Any()).
Return(chartRepo, nil).
Times(1)
indexPath := "some_path.yaml"
repoClient.EXPECT().
DownloadIndex(chartRepo).
Return(indexPath, nil).
Times(1)
repoClient.EXPECT().
Load(indexPath).
Return(&repo.IndexFile{}, nil).
Times(1)
client := helm.NewClient(
log.NewNoop(),
repoClient,
nil,
)
err := client.Initialize("")
Expect(err).NotTo(HaveOccurred())
})
})
Describe("ListCharts", func() {
It("should return the chart entries", func() {
expectedCharts := map[string]repo.ChartVersions{
"foo": make(repo.ChartVersions, 0),
"bar": make(repo.ChartVersions, 0),
}
repoClient := newRepoClient(ctrl, expectedCharts)
client := helm.NewClient(log.NewNoop(), repoClient, nil)
err := client.Initialize("")
Expect(err).NotTo(HaveOccurred())
charts := client.ListCharts()
Expect(charts).To(Equal(expectedCharts))
})
})
Describe("GetChart", func() {
It("should fail when the chart doesn't exist", func() {
charts := map[string]repo.ChartVersions{"foo": make(repo.ChartVersions, 0)}
repoClient := newRepoClient(ctrl, charts)
client := helm.NewClient(log.NewNoop(), repoClient, nil)
err := client.Initialize("")
Expect(err).NotTo(HaveOccurred())
chart, err := client.GetChart("bar", "")
Expect(err).To(Equal(fmt.Errorf("failed to get chart: chart not found: bar")))
Expect(chart).To(BeNil())
})
It("should fail when the chart version doesn't exist", func() {
charts := map[string]repo.ChartVersions{"bar": make(repo.ChartVersions, 0)}
repoClient := newRepoClient(ctrl, charts)
client := helm.NewClient(log.NewNoop(), repoClient, nil)
err := client.Initialize("")
Expect(err).NotTo(HaveOccurred())
chart, err := client.GetChart("bar", "1.2.3")
Expect(err).To(Equal(fmt.Errorf("failed to get chart: chart app version not found for \"bar\": 1.2.3")))
Expect(chart).To(BeNil())
})
It("should succeed returning the requested chart", func() {
chartMetadata := &chart.Metadata{AppVersion: "1.2.3"}
expectedChart := &repo.ChartVersion{Metadata: chartMetadata}
versions := repo.ChartVersions{expectedChart}
charts := map[string]repo.ChartVersions{"bar": versions}
repoClient := newRepoClient(ctrl, charts)
client := helm.NewClient(log.NewNoop(), repoClient, nil)
err := client.Initialize("")
Expect(err).NotTo(HaveOccurred())
chart, err := client.GetChart("bar", "1.2.3")
Expect(err).NotTo(HaveOccurred())
Expect(chart).To(Equal(expectedChart))
})
})
Describe("ChartClient", func() {
It("should return the expected chart client", func() {
chartClient := helm.NewDefaultChartClient()
client := helm.NewClient(nil, nil, chartClient)
Expect(client.ChartClient()).To(Equal(chartClient))
})
})
})
})
func newRepoClient(ctrl *gomock.Controller, charts map[string]repo.ChartVersions) helm.RepositoryInitializeDownloadLoader {
repoClient := mocks.NewMockRepositoryInitializeDownloadLoader(ctrl)
chartRepo := &repo.ChartRepository{Config: &repo.Entry{URL: "https://repository"}}
indexPath := "some_path.yaml"
indexFile := &repo.IndexFile{Entries: charts}
repoClient.EXPECT().
Initialize(gomock.Any(), gomock.Any()).
Return(chartRepo, nil).
Times(1)
repoClient.EXPECT().
DownloadIndex(chartRepo).
Return(indexPath, nil).
Times(1)
repoClient.EXPECT().
Load(indexPath).
Return(indexFile, nil).
Times(1)
return repoClient
}
|
package xml
import (
"bufio"
"io"
)
// Reader represents a XML reader.
type Reader struct {
r *bufio.Reader
err error
e Element
n *string
}
// NewReader returns a initialized reader.
func NewReader(r io.Reader) *Reader {
return &Reader{
r: bufio.NewReaderSize(r, 2<<12),
}
}
// Element returns the last readed element.
func (r *Reader) Element() Element {
return r.e
}
// Error return the last error.
func (r *Reader) Error() error {
return r.err
}
func (r *Reader) release() {
if r.e == nil {
return
}
if e, ok := r.e.(*StartElement); ok {
releaseStart(e)
} else if e, ok := r.e.(*EndElement); ok {
releaseEnd(e)
}
r.e = nil
}
// Next iterates until the next XML element.
func (r *Reader) Next() bool {
r.release()
var c byte
for r.e == nil && r.err == nil {
c, r.err = skipWS(r.r)
if r.err == nil {
switch c { // get next token
case '<': // new element
r.next()
default: // text string
r.r.UnreadByte()
t, err := r.r.ReadString('<') // read until a new element starts (or EOF is reached)
if err != nil {
r.err = err
} else {
t = t[:len(t)-1]
if r.n != nil {
*r.n, r.n = t, nil
} else {
tt := TextElement(t)
r.e = &tt
}
r.r.UnreadByte()
}
}
}
}
return r.e != nil && r.err == nil
}
// AssignNext will assign the next TextElement to ptr.
func (r *Reader) AssignNext(ptr *string) {
r.n = ptr
}
// skip reads until the next end tag '>'
func (r *Reader) skip() error {
_, err := r.r.ReadBytes('>')
return err
}
// next will read the next byte after finding '<'
func (r *Reader) next() {
var c byte
c, r.err = skipWS(r.r)
if r.err == nil {
switch c {
case '/':
r.e = endPool.Get().(*EndElement)
case '!':
r.err = r.skip()
case '?':
r.err = r.skip()
default:
r.e = startPool.Get().(*StartElement)
r.r.UnreadByte()
}
if r.err == nil && r.e != nil {
r.err = r.e.parse(r.r)
if r.err != nil {
r.e = nil
}
}
}
}
|
package bot
type Update struct {
Messages []*Message
Buttons []*Button
Inlines []*Inline
}
type Button struct {
Text string
Handler Handler
URL string
SwitchInlineQuery string
callbackData string
}
type Message struct {
Text string
}
type Inline struct {
Id string
Query string
}
type Response struct {
Text string
Buttons [][]*Button
ClickedButton *Button
ReplyHandler Handler
messageId int
}
type InlineAnswer struct {
InlineId string
Title string
MessageText string
Description string
Button *Button
}
func (r *Response) AddButtonString(text string, handler Handler) {
b := make([]*Button, 0)
b = append(b, &Button{Text: text, Handler: handler})
r.AddButtonRow(b...)
}
func (r *Response) AddButton(b *Button) {
buttonRow := make([]*Button, 0)
buttonRow = append(buttonRow, b)
r.AddButtonRow(buttonRow...)
}
func (r *Response) AddButtonRow(b ...*Button) {
r.Buttons = append(r.Buttons, b)
}
func (r *Response) ClearButtons() {
r.Buttons = make([][]*Button, 0)
}
|
package main
import "fmt"
//定义二维数组,用于保存三个班,每个班五名同学成绩,并求出每个班级平均分、以及所有班级平均分
func main() {
var scores [3][5]float64
for i := 0; i < len(scores); i++ {
for j := 0; j < len(scores[i]); j++ {
fmt.Printf("请输入第%d班,第%d号学生成绩:\n", i+1, j+1)
fmt.Scanln(&scores[i][j])
}
}
totlaSum := 0.0
for i := 0; i < len(scores); i++ {
sum := 0.0
for j := 0; j < len(scores[i]); j++ {
sum += scores[i][j]
}
totlaSum += sum
fmt.Printf("第%v班总分:%v,平均分:%v\n", i+1, sum, sum/float64(len(scores[i])))
}
fmt.Printf("所有班级总分:%v,平均分:%v\n", totlaSum, totlaSum/15)
}
|
package console
type Color int
const (
// No change of color
COLOR_DEFAULT Color = iota
COLOR_BLACK
COLOR_RED
COLOR_GREEN
COLOR_YELLOW
COLOR_BLUE
COLOR_MAGENTA
COLOR_CYAN
COLOR_WHITE
)
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"hash"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tipb/go-tipb"
"github.com/twmb/murmur3"
)
// FMSketch is used to count the number of distinct elements in a set.
type FMSketch struct {
hashFunc hash.Hash64
hashset map[uint64]bool
mask uint64
maxSize int
}
// NewFMSketch returns a new FM sketch.
func NewFMSketch(maxSize int) *FMSketch {
return &FMSketch{
hashset: make(map[uint64]bool),
maxSize: maxSize,
hashFunc: murmur3.New64(),
}
}
// Copy makes a copy for current FMSketch.
func (s *FMSketch) Copy() *FMSketch {
if s == nil {
return nil
}
hashset := make(map[uint64]bool)
for key, value := range s.hashset {
hashset[key] = value
}
return &FMSketch{
hashset: hashset,
mask: s.mask,
maxSize: s.maxSize,
hashFunc: murmur3.New64(),
}
}
// NDV returns the ndv of the sketch.
func (s *FMSketch) NDV() int64 {
if s == nil {
return 0
}
return int64(s.mask+1) * int64(len(s.hashset))
}
func (s *FMSketch) insertHashValue(hashVal uint64) {
if (hashVal & s.mask) != 0 {
return
}
s.hashset[hashVal] = true
if len(s.hashset) > s.maxSize {
s.mask = s.mask*2 + 1
for key := range s.hashset {
if (key & s.mask) != 0 {
delete(s.hashset, key)
}
}
}
}
// InsertValue inserts a value into the FM sketch.
func (s *FMSketch) InsertValue(sc *stmtctx.StatementContext, value types.Datum) error {
bytes, err := codec.EncodeValue(sc, nil, value)
if err != nil {
return errors.Trace(err)
}
s.hashFunc.Reset()
_, err = s.hashFunc.Write(bytes)
if err != nil {
return errors.Trace(err)
}
s.insertHashValue(s.hashFunc.Sum64())
return nil
}
// InsertRowValue inserts multi-column values to the sketch.
func (s *FMSketch) InsertRowValue(sc *stmtctx.StatementContext, values []types.Datum) error {
b := make([]byte, 0, 8)
s.hashFunc.Reset()
for _, v := range values {
b = b[:0]
b, err := codec.EncodeValue(sc, b, v)
if err != nil {
return err
}
_, err = s.hashFunc.Write(b)
if err != nil {
return err
}
}
s.insertHashValue(s.hashFunc.Sum64())
return nil
}
// MergeFMSketch merges two FM Sketch.
func (s *FMSketch) MergeFMSketch(rs *FMSketch) {
if s == nil || rs == nil {
return
}
if s.mask < rs.mask {
s.mask = rs.mask
for key := range s.hashset {
if (key & s.mask) != 0 {
delete(s.hashset, key)
}
}
}
for key := range rs.hashset {
s.insertHashValue(key)
}
}
// FMSketchToProto converts FMSketch to its protobuf representation.
func FMSketchToProto(s *FMSketch) *tipb.FMSketch {
protoSketch := new(tipb.FMSketch)
if s != nil {
protoSketch.Mask = s.mask
for val := range s.hashset {
protoSketch.Hashset = append(protoSketch.Hashset, val)
}
}
return protoSketch
}
// FMSketchFromProto converts FMSketch from its protobuf representation.
func FMSketchFromProto(protoSketch *tipb.FMSketch) *FMSketch {
if protoSketch == nil {
return nil
}
sketch := &FMSketch{
hashset: make(map[uint64]bool, len(protoSketch.Hashset)),
mask: protoSketch.Mask,
}
for _, val := range protoSketch.Hashset {
sketch.hashset[val] = true
}
return sketch
}
// EncodeFMSketch encodes the given FMSketch to byte slice.
func EncodeFMSketch(c *FMSketch) ([]byte, error) {
if c == nil {
return nil, nil
}
p := FMSketchToProto(c)
protoData, err := p.Marshal()
return protoData, err
}
// DecodeFMSketch decode a FMSketch from the given byte slice.
func DecodeFMSketch(data []byte) (*FMSketch, error) {
if data == nil {
return nil, nil
}
p := &tipb.FMSketch{}
err := p.Unmarshal(data)
if err != nil {
return nil, errors.Trace(err)
}
fm := FMSketchFromProto(p)
fm.maxSize = 10000 // TODO: add this attribute to PB and persist it instead of using a fixed number(executor.maxSketchSize)
return fm, nil
}
// MemoryUsage returns the total memory usage of a FMSketch.
func (s *FMSketch) MemoryUsage() (sum int64) {
// In FMSketch, we will ignore the memory usage of `hashFunc`.
// As for the variables mask(uint64) and maxSize(int) each will consume 8 bytes. This is the origin of the constant 16.
// And for the variables hashset(map[uint64]bool), each element in map will consume 9 bytes(8[uint64] + 1[bool]).
sum = int64(16 + 9*len(s.hashset))
return
}
|
package wordsearch2
func findFirstLetterIndices(board [][]byte, firstLetter byte) [][]int {
firstLetterIndices := make([][]int, 0)
for indVertical := range board {
for indHorizontal := range board[0] {
if board[indVertical][indHorizontal] == firstLetter {
firstLetterIndices = append(firstLetterIndices, []int{indVertical, indHorizontal})
}
}
}
return firstLetterIndices
}
func backTrack(
results *[]string,
targetWord string,
currWord string,
board [][]byte,
currVertIndex int,
currHorIndex int,
currTargetWordIndex int,
usedIndices map[[2]int]int,
) {
if currWord == targetWord {
*results = append(*results, currWord)
return
}
if len(*results) > 0 {
return
}
currTargetLetter := targetWord[currTargetWordIndex]
candidatesIndices := [][]int{
{currVertIndex, currHorIndex + 1},
{currVertIndex, currHorIndex - 1},
{currVertIndex + 1, currHorIndex},
{currVertIndex - 1, currHorIndex},
}
for _, indicesCandidates := range candidatesIndices {
firstInd, secondInd := indicesCandidates[0], indicesCandidates[1]
_, indicesAlreadyUsed := usedIndices[[2]int{firstInd, secondInd}]
if firstInd < len(board) && secondInd < len(board[0]) && firstInd >= 0 && secondInd >= 0 && !indicesAlreadyUsed {
if board[firstInd][secondInd] == currTargetLetter {
usedIndices[[2]int{firstInd, secondInd}] = 1
backTrack(results, targetWord, currWord+string(currTargetLetter), board, firstInd, secondInd, currTargetWordIndex+1, usedIndices)
delete(usedIndices, [2]int{firstInd, secondInd})
}
}
}
}
// WordExist ...
func WordExist(board [][]byte, word string) bool {
firstLetterIndices := findFirstLetterIndices(board, word[0])
if len(firstLetterIndices) == 0 {
return false
}
if len(word) == 1 && len(firstLetterIndices) > 0 {
return true
}
results := make([]string, 0)
for _, indicesOfFirstLetter := range firstLetterIndices {
vertIndex, horIndex := indicesOfFirstLetter[0], indicesOfFirstLetter[1]
usedIndices := make(map[[2]int]int)
usedIndices[[2]int{vertIndex, horIndex}] = 1
backTrack(&results, word, string(word[0]), board, vertIndex, horIndex, 1, usedIndices)
}
return len(results) > 0
}
func findWords(board [][]byte, words []string) []string {
foundWords := make([]string, 0)
for _, word := range words {
if WordExist(board, word) {
foundWords = append(foundWords, word)
}
}
return foundWords
}
|
package poc
import (
"database/sql"
"log"
"time"
_ "github.com/go-sql-driver/mysql"
)
// BlockingPoc
type BlockingPoc struct {
dbDriverName string
dbDataSourceName string
ChunkSize int
}
// NewBlockingPoc
func NewBlockingPoc(dbDriverName string, dbDataSourceName string, chunkSize int) *BlockingPoc {
return &BlockingPoc{
dbDriverName,
dbDataSourceName,
chunkSize,
}
}
// Execute - blocking trimming of a table by 1k chunks iterating though a numeric primary key including optimal chunk interval resolution (ID gaps prune)
func (p *BlockingPoc) Execute() {
// Open up our database connection.
db, err := sql.Open(p.dbDriverName, p.dbDataSourceName)
// if there is an error opening the connection, handle it
if err != nil {
log.Print(err.Error())
}
defer db.Close()
tableSize := p.getTableSize(db)
log.Printf("Blocking PoC")
log.Printf("Chunk Size: %d", p.ChunkSize)
log.Printf("Table Size: %d", tableSize)
executionStart := time.Now()
startIntervalID := p.getStartIntervalID(db)
endIntervalID := p.getEndIntervalID(db, startIntervalID, p.ChunkSize)
for endIntervalID != 0 {
p.deleteChunk(db, startIntervalID, endIntervalID)
startIntervalID := endIntervalID
endIntervalID = p.getEndIntervalID(db, startIntervalID, p.ChunkSize)
}
p.deleteLastChunk(db, startIntervalID)
executionElapsed := time.Since(executionStart)
log.Printf("Table Trimming took - %s", executionElapsed)
}
// GetTableSize
func (p *BlockingPoc) getTableSize(db *sql.DB) int {
var count int
err := db.QueryRow("SELECT COUNT(entity_id) FROM catalog_product_entity").Scan(&count)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
return count
}
// GetStartIntervalID
func (p *BlockingPoc) getStartIntervalID(db *sql.DB) int {
var startIntervalID int
err := db.QueryRow("SELECT MIN(entity_id) FROM catalog_product_entity").Scan(&startIntervalID)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
return startIntervalID
}
// GetEndIntervalID
func (p *BlockingPoc) getEndIntervalID(db *sql.DB, startIntervalID int, chunkSize int) int {
var endIntervalID int
err := db.QueryRow("SELECT entity_id FROM catalog_product_entity WHERE entity_id >= ? ORDER BY entity_id LIMIT ?,1", startIntervalID, chunkSize).Scan(&endIntervalID)
if err == sql.ErrNoRows {
return 0
}
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
return endIntervalID
}
// DeleteChunk
func (p *BlockingPoc) deleteChunk(db *sql.DB, startIntervalID int, endIntervalID int) {
_, err := db.Exec("DELETE FROM catalog_product_entity WHERE entity_id >= ? AND entity_id < ?", startIntervalID, endIntervalID)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
}
// DeleteLastChunk
func (p *BlockingPoc) deleteLastChunk(db *sql.DB, startIntervalID int) {
_, err := db.Exec("DELETE FROM catalog_product_entity WHERE entity_id >= ?", startIntervalID)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
}
|
package bot
import (
"net/http"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestConfig(t *testing.T) {
testLogger := noopLogger{}
testHTTPClient := &http.Client{}
var tests = []struct {
name string
opts []Option
want *config
}{
{
name: "logger",
opts: []Option{WithLogger(testLogger)},
want: &config{Log: testLogger},
},
{
name: "allowed_user",
opts: []Option{WithAllowedUser("user1")},
want: &config{AllowedUser: "user1"},
},
{
name: "http_client",
opts: []Option{WithHTTPClient(testHTTPClient)},
want: &config{HTTPClient: testHTTPClient},
},
{
name: "set_commands",
opts: []Option{WithSetCommands()},
want: &config{SetCommands: true},
},
{
name: "locations",
opts: []Option{
WithLocations(Location{Name: "loc1", Path: "/download/loc1"}),
WithLocations(
Location{Name: "loc2", Path: "/download/loc2"},
Location{Name: "loc3", Path: "/download/loc3"},
),
},
want: &config{
Locations: []Location{
{Name: "loc1", Path: "/download/loc1"},
{Name: "loc2", Path: "/download/loc2"},
{Name: "loc3", Path: "/download/loc3"},
},
},
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
cfg := new(config)
for _, opt := range tc.opts {
opt.apply(cfg)
}
if !cmp.Equal(tc.want, cfg) {
t.Errorf("got unexpected result, diff = \n%s", cmp.Diff(tc.want, cfg))
}
})
}
}
|
package main
import (
"strings"
"fmt"
)
// func getUserListSQL(username, email string) string {
// sql := "selsct from user"
// where := []string{}
// if username != ""{
// where = append(where, fmt.Sprintf("username = '%s",username))
// }
// if email != ""{
// where = append(where, fmt.Sprintf("email = '%s",email))
// }
// return sql + "where" + strings.Join(where, " or ")
// }
type searchOpt struct{
username string
email string
}
func getUserListoptsSQL(opts searchOpt) string {
sql := "selsct from user"
where := []string{}
if opts.username != ""{
where = append(where, fmt.Sprintf("username = '%s",opts.username))
}
if opts.email != ""{
where = append(where, fmt.Sprintf("email = '%s",opts.email))
}
return sql + "where" + strings.Join(where, " or ")
}
func main() {
// fmt.Println(getUserListSQL("MeatTaro", ""))
// fmt.Println(getUserListSQL("MeatTaro", "test@gmail,.com"))
fmt.Println(getUserListoptsSQL(searchOpt{
username: "MeatTaro",
email: "test@gmail",
}))
} |
package pov
import (
"errors"
"fmt"
"time"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/merkle"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/ledger"
"github.com/qlcchain/go-qlc/ledger/process"
"github.com/qlcchain/go-qlc/log"
"github.com/qlcchain/go-qlc/trie"
"go.uber.org/zap"
)
type PovVerifier struct {
store ledger.Store
chain PovVerifierChainReader
cs ConsensusPov
logger *zap.SugaredLogger
}
type PovVerifyStat struct {
Result process.ProcessResult
ErrMsg string
TxResults map[types.Hash]process.ProcessResult
CurHeader *types.PovHeader
PrevHeader *types.PovHeader
PrevStateTrie *trie.Trie
StateTrie *trie.Trie
TxBlocks map[types.Hash]*types.StateBlock
}
func NewPovVerifyStat() *PovVerifyStat {
pvs := new(PovVerifyStat)
pvs.TxResults = make(map[types.Hash]process.ProcessResult)
pvs.TxBlocks = make(map[types.Hash]*types.StateBlock)
return pvs
}
func (pvs *PovVerifyStat) setResult(result process.ProcessResult, err error) {
pvs.Result = result
if err != nil {
pvs.ErrMsg = err.Error()
}
}
func (pvs *PovVerifyStat) getCurHeader(pv *PovVerifier, block *types.PovBlock) *types.PovHeader {
if pvs.CurHeader == nil {
pvs.CurHeader = block.GetHeader()
}
return pvs.CurHeader
}
func (pvs *PovVerifyStat) getPrevHeader(pv *PovVerifier, prevHash types.Hash) *types.PovHeader {
if pvs.PrevHeader == nil {
pvs.PrevHeader = pv.chain.GetHeaderByHash(prevHash)
}
return pvs.PrevHeader
}
func (pvs *PovVerifyStat) getPrevStateTrie(pv *PovVerifier, prevHash types.Hash) *trie.Trie {
if pvs.PrevStateTrie == nil {
prevHeader := pvs.getPrevHeader(pv, prevHash)
if prevHeader != nil {
prevStateHash := prevHeader.GetStateHash()
pvs.PrevStateTrie = pv.chain.GetStateTrie(&prevStateHash)
}
}
return pvs.PrevStateTrie
}
type PovVerifierChainReader interface {
GetHeaderByHash(hash types.Hash) *types.PovHeader
CalcPastMedianTime(prevHeader *types.PovHeader) int64
GenStateTrie(prevStateHash types.Hash, txs []*types.PovTransaction) (*trie.Trie, error)
GetStateTrie(stateHash *types.Hash) *trie.Trie
GetAccountState(trie *trie.Trie, address types.Address) *types.PovAccountState
}
func NewPovVerifier(store ledger.Store, chain PovVerifierChainReader, cs ConsensusPov) *PovVerifier {
return &PovVerifier{store: store, chain: chain, cs: cs, logger: log.NewLogger("pov_verifier")}
}
func (pv *PovVerifier) Process(block types.Block) (process.ProcessResult, error) {
return process.Other, nil
}
func (pv *PovVerifier) BlockCheck(block types.Block) (process.ProcessResult, error) {
return process.Other, nil
}
func (pv *PovVerifier) VerifyNet(block *types.PovBlock) *PovVerifyStat {
stat := NewPovVerifyStat()
result, err := pv.verifyDataIntegrity(block, stat)
if err != nil {
stat.Result = result
stat.ErrMsg = err.Error()
return stat
}
result, err = pv.verifyTimestamp(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
stat.Result = process.Progress
return stat
}
func (pv *PovVerifier) VerifyFull(block *types.PovBlock) *PovVerifyStat {
stat := NewPovVerifyStat()
result, err := pv.verifyDataIntegrity(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
result, err = pv.verifyTimestamp(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
result, err = pv.verifyReferred(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
result, err = pv.verifyConsensus(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
result, err = pv.verifyTransactions(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
result, err = pv.verifyState(block, stat)
if err != nil || result != process.Progress {
stat.setResult(result, err)
return stat
}
stat.Result = process.Progress
return stat
}
func (pv *PovVerifier) verifyDataIntegrity(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
if common.PovChainGenesisBlockHeight == block.GetHeight() {
if !common.IsGenesisPovBlock(block) {
return process.BadHash, fmt.Errorf("bad genesis block hash %s", block.Hash)
}
}
computedHash := block.ComputeHash()
if block.Hash.IsZero() || computedHash != block.Hash {
return process.BadHash, fmt.Errorf("bad hash, %s != %s", computedHash, block.Hash)
}
if block.Coinbase.IsZero() {
return process.BadSignature, errors.New("coinbase is zero")
}
if block.Signature.IsZero() {
return process.BadSignature, errors.New("signature is zero")
}
isVerified := block.Coinbase.Verify(block.GetHash().Bytes(), block.GetSignature().Bytes())
if !isVerified {
return process.BadSignature, errors.New("bad signature")
}
return process.Progress, nil
}
func (pv *PovVerifier) verifyTimestamp(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
if block.Timestamp <= 0 {
return process.InvalidTime, errors.New("timestamp is zero")
}
if block.GetTimestamp() > (time.Now().Unix() + int64(common.PovMaxAllowedFutureTimeSec)) {
return process.InvalidTime, fmt.Errorf("timestamp %d too far from future", block.GetTimestamp())
}
return process.Progress, nil
}
func (pv *PovVerifier) verifyReferred(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
prevHeader := stat.getPrevHeader(pv, block.GetPrevious())
if prevHeader == nil {
return process.GapPrevious, nil
}
if block.GetHeight() != prevHeader.GetHeight()+1 {
return process.InvalidHeight, fmt.Errorf("height %d not continue with previous %d", block.GetHeight(), prevHeader.GetHeight())
}
medianTime := pv.chain.CalcPastMedianTime(prevHeader)
if block.GetTimestamp() < medianTime {
return process.InvalidTime, fmt.Errorf("timestamp %d not greater than median time %d", block.GetTimestamp(), medianTime)
}
return process.Progress, nil
}
func (pv *PovVerifier) verifyTransactions(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
if block.TxNum != uint32(len(block.Transactions)) {
return process.InvalidTxNum, nil
}
if len(block.Transactions) <= 0 {
if !block.MerkleRoot.IsZero() {
return process.BadMerkleRoot, fmt.Errorf("bad merkle root not zero when txs empty")
}
return process.Progress, nil
}
var txHashList []*types.Hash
for _, tx := range block.Transactions {
txHash := tx.Hash
txHashList = append(txHashList, &txHash)
}
merkleRoot := merkle.CalcMerkleTreeRootHash(txHashList)
if merkleRoot.IsZero() {
return process.BadMerkleRoot, fmt.Errorf("bad merkle root is zero when txs exist")
}
if merkleRoot != block.MerkleRoot {
return process.BadMerkleRoot, fmt.Errorf("bad merkle root not equals %s != %s", merkleRoot, block.MerkleRoot)
}
for _, tx := range block.Transactions {
txBlock, _ := pv.store.GetStateBlock(tx.Hash)
if txBlock == nil {
stat.TxResults[tx.Hash] = process.GapTransaction
} else {
tx.Block = txBlock
stat.TxBlocks[tx.Hash] = txBlock
}
}
if len(stat.TxResults) > 0 {
return process.GapTransaction, fmt.Errorf("total %d txs in pending", len(stat.TxResults))
}
prevTrie := stat.getPrevStateTrie(pv, block.GetPrevious())
if prevTrie == nil {
return process.BadStateHash, errors.New("failed to get prev state tire")
}
addrTokenPrevHashes := make(map[types.AddressToken]types.Hash)
for txIdx := 0; txIdx < len(block.Transactions); txIdx++ {
tx := block.Transactions[txIdx]
isCA := types.IsContractAddress(tx.Block.GetAddress())
addrToken := types.AddressToken{Address: tx.Block.GetAddress(), Token: tx.Block.GetToken()}
prevHashWant, ok := addrTokenPrevHashes[addrToken]
if !ok {
// contract address's blocks are all independent, no previous
if isCA {
prevHashWant = types.ZeroHash
} else {
as := pv.chain.GetAccountState(prevTrie, tx.Block.GetAddress())
if as != nil {
ts := as.GetTokenState(tx.Block.GetToken())
if ts != nil {
prevHashWant = ts.Hash
} else {
prevHashWant = types.ZeroHash
}
} else {
prevHashWant = types.ZeroHash
}
}
}
//pv.logger.Debugf("address %s token %s block %s", tx.Block.GetAddress(), tx.Block.GetToken(), tx.GetHash())
//pv.logger.Debugf("prevHashWant %s txPrevHash %s", prevHashWant, tx.Block.GetPrevious())
if prevHashWant != tx.Block.GetPrevious() {
return process.InvalidTxOrder, errors.New("tx is not in order")
}
// contract address's blocks are all independent, no previous
if !isCA {
addrTokenPrevHashes[addrToken] = tx.Block.GetHash()
}
}
return process.Progress, nil
}
func (pv *PovVerifier) verifyState(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
prevHeader := stat.getPrevHeader(pv, block.GetPrevious())
if prevHeader == nil {
return process.GapPrevious, fmt.Errorf("prev block %s pending", block.GetPrevious())
}
stateTrie, err := pv.chain.GenStateTrie(prevHeader.StateHash, block.Transactions)
if err != nil {
return process.BadStateHash, err
}
stateHash := types.Hash{}
if stateTrie != nil {
stateHash = *stateTrie.Hash()
}
if stateHash != block.StateHash {
return process.BadStateHash, fmt.Errorf("state hash is not equals %s != %s", stateHash, block.StateHash)
}
stat.StateTrie = stateTrie
return process.Progress, nil
}
func (pv *PovVerifier) verifyConsensus(block *types.PovBlock, stat *PovVerifyStat) (process.ProcessResult, error) {
header := stat.getCurHeader(pv, block)
err := pv.cs.VerifyHeader(header)
if err != nil {
return process.BadConsensus, err
}
return process.Progress, nil
}
|
// Name of the package is not the directory name
// Package name is what we use in the package declaration
// Directory name is used to find the package location
package display
import "fmt"
// Exported functions as the name is capitalized
func ConsoleLogString(str string) {
fmt.Println(str)
}
func ConsoleLogInt(num int) {
fmt.Println(num)
}
|
package main
import (
"fmt"
"github.com/mitchellh/mapstructure"
)
func addChannel(client *Client, data interface{}) {
var channel Channel
var message Message
mapstructure.Decode(data, &channel)
fmt.Printf("%#v\n", channel)
channel.ID = "1"
message.Name = "channel add"
message.Data = channel
client.send <- message
}
|
package command
import (
"fmt"
"os"
"strings"
"text/tabwriter"
pb "github.com/ernestoalejo/tfg-fn/protos"
)
var (
client pb.FnClient
writer = tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
)
func SetClient(c pb.FnClient) {
client = c
}
func FlushOutput() {
writer.Flush()
}
func tabPrint(fields []string) {
fmt.Fprintf(writer, strings.Join(fields, "\t")+"\n")
}
|
package utils
import (
"fmt"
"github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
// EmailData encapsulates email sending data
type EmailData struct {
To []*mail.Email
PageTitle string
Preheader string
Subject string
BodyTitle string
FirstBodyText string
SecondBodyText string
Button struct {
URL string
Text string
}
}
// SendEmail - sends email to customers
func SendEmail(emailBody []byte, apiKey string) {
request := sendgrid.GetRequest(apiKey, "/v3/mail/send", "https://api.sendgrid.com")
request.Method = "POST"
request.Body = emailBody
response, err := sendgrid.API(request)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// ProcessEmail Email utilizing dynamic transactional templates
// Note: you must customize subject line of the dynamic template itself
// Note: you may not use substitutions with dynamic templates
func ProcessEmail(emailData EmailData) []byte {
m := mail.NewV3Mail()
address := "noreply@kimberly-ryan.com"
name := "Kimberly Ryan"
e := mail.NewEmail(name, address)
m.SetFrom(e)
m.Subject = emailData.Subject
m.SetTemplateID("d-5cad13d382184d6c913caa58ea0944b9")
p := mail.NewPersonalization()
tos := emailData.To
p.AddTos(tos...)
p.Subject = emailData.Subject
title := emailData.BodyTitle
firstBody := emailData.FirstBodyText
secondBody := emailData.SecondBodyText
url := emailData.Button.URL
button := make(map[string]string)
button["text"] = emailData.Button.Text
button["url"] = url
p.SetDynamicTemplateData("page_title", emailData.PageTitle)
p.SetDynamicTemplateData("subject", emailData.Subject)
p.SetDynamicTemplateData("preheader", emailData.Preheader)
p.SetDynamicTemplateData("title", title)
p.SetDynamicTemplateData("first_body", firstBody)
p.SetDynamicTemplateData("second_body", secondBody)
p.SetDynamicTemplateData("button", button)
m.AddPersonalizations(p)
return mail.GetRequestBody(m)
}
|
package main
/*
- scan reads user input
- takes a pointer as an argument
- typed data is written to pointer
- retuns number of scanned items (and the error or nil)
*/
import (
"fmt"
)
func main() {
var n float64
fmt.Printf("Please enter a floating point number and press ENTER.\n")
num, err := fmt.Scan(&n)
if err == nil {
fmt.Printf("truncated number: %d", int(n))
} else {
fmt.Printf("num: %d", num)
fmt.Printf("error: %s", err)
}
}
|
package services
import (
"context"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/tppgit/we_service/pkg/errors"
"testing"
"github.com/tppgit/we_service/core"
"github.com/tppgit/we_service/entity/user"
"github.com/tppgit/we_service/pkg/auth"
)
type mockUserService struct {
mock.Mock
user.UserService
}
func (m *mockUserService) GetUserByEmail(email string) (*user.User, error) {
arg := m.Called(email)
return arg.Get(0).(*user.User), arg.Error(1)
}
func (m *mockUserService) UpdateUserToken(id uuid.UUID, token string) error {
arg := m.Called(id, token)
return arg.Error(0)
}
func (m *mockUserService) CreateUser(u *user.User) error {
arg := m.Called(u)
return arg.Error(0)
}
type mockWumService struct {
mock.Mock
}
func (m *mockWumService) GetUser(email string) (auth.WumboUser, error) {
arg := m.Called(email)
return arg.Get(0).(auth.WumboUser), arg.Error(1)
}
func TestUserGrpcImpl_SyncToken(t *testing.T) {
ctx := context.Background()
defer ctx.Done()
type fields struct {
UserService user.UserService
Wumbo auth.WumboServiceDefinition
}
type args struct {
info *core.UserInfoPostMessage
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Create token successfully",
args: args{
info: &core.UserInfoPostMessage{
Email: "paul@tpp.com",
Token: "new-token",
},
},
fields: fields{
UserService: func() *mockUserService {
mUser := new(mockUserService)
mUser.On("GetUserByEmail", mock.Anything).Return(&user.User{Token: "old-token", Email: "paul@tpp.com"}, nil)
mUser.On("UpdateUserToken", mock.Anything, mock.Anything).Return(nil)
return mUser
}(),
Wumbo: func() *mockWumService {
mWumbo := new(mockWumService)
mWumbo.On("GetUser", mock.Anything).Return(nil, nil)
return mWumbo
}(),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &UserGrpcImpl{
UserService: tt.fields.UserService,
Wumbo: tt.fields.Wumbo,
}
_, err := m.SyncToken(ctx, tt.args.info)
if (err != nil) != tt.wantErr {
t.Errorf("UserGrpcImpl.SyncToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
mUser, _ := tt.fields.UserService.(*mockUserService)
mUser.AssertCalled(t, "UpdateUserToken", mock.Anything, mock.MatchedBy(func(token string) bool {
assert.Equal(t, token, tt.args.info.Token)
return true
}))
})
}
}
func TestUserGrpcImpl_SyncToken_FromWumbo(t *testing.T) {
ctx := context.Background()
defer ctx.Done()
type fields struct {
UserService user.UserService
Wumbo auth.WumboServiceDefinition
}
type args struct {
info *core.UserInfoPostMessage
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Create token successfully",
args: args{
info: &core.UserInfoPostMessage{
Email: "paul@tpp.com",
Token: "new-token",
},
},
fields: fields{
UserService: func() *mockUserService {
mUser := new(mockUserService)
mUser.On("GetUserByEmail", mock.Anything).Return(&user.User{}, errors.NotFound)
mUser.On("CreateUser", mock.Anything).Return(nil)
return mUser
}(),
Wumbo: func() *mockWumService {
mWumbo := new(mockWumService)
mWumbo.On("GetUser", mock.Anything).Return(auth.WumboUser{Email: "paul@tpp.com", Name: "paulaan"}, nil)
return mWumbo
}(),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &UserGrpcImpl{
UserService: tt.fields.UserService,
Wumbo: tt.fields.Wumbo,
}
_, err := m.SyncToken(ctx, tt.args.info)
if (err != nil) != tt.wantErr {
t.Errorf("UserGrpcImpl.SyncToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
mUser, _ := tt.fields.UserService.(*mockUserService)
mUser.AssertCalled(t, "CreateUser", mock.MatchedBy(func(u *user.User) bool {
assert.Equal(t, u.Token, tt.args.info.Token)
assert.Equal(t, u.Email, tt.args.info.Email)
assert.Equal(t, u.Name, "paulaan")
return true
}))
})
}
}
func TestUserGrpcImpl_SyncToken_Errors(t *testing.T) {
ctx := context.Background()
defer ctx.Done()
type fields struct {
UserService user.UserService
Wumbo auth.WumboServiceDefinition
}
type args struct {
info *core.UserInfoPostMessage
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Create token successfully",
args: args{
info: &core.UserInfoPostMessage{},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &UserGrpcImpl{
UserService: tt.fields.UserService,
Wumbo: tt.fields.Wumbo,
}
_, err := m.SyncToken(ctx, tt.args.info)
if (err != nil) != tt.wantErr {
t.Errorf("UserGrpcImpl.SyncToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
|
package Problem0373
import "container/heap"
type pair struct {
i int
j int
sum int
}
type priorityQueue []*pair
func (pq priorityQueue) Len() int { return len(pq) }
func (pq priorityQueue) Less(i, j int) bool {
return pq[i].sum < pq[j].sum
}
func (pq priorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
}
func (pq *priorityQueue) Push(x interface{}) {
p := x.(*pair)
*pq = append(*pq, p)
}
func (pq *priorityQueue) Pop() interface{} {
old := *pq
n := len(old)
p := old[n-1]
*pq = old[0 : n-1]
return p
}
func kSmallestPairs(a, b []int, k int) [][]int {
var res [][]int
if len(a) == 0 || len(b) == 0 {
return res
}
// 可以想象一下,存在一个 len(a) * len(b) 的矩阵 mat
// mat[i][j] == a[i]+b[j]
// res 就是 mat 中前 k 项值的坐标对
// 由题意可知,a,b 递增,那么 mat 中的每一行和每一列也是递增的。
pqLen := min(k, len(a))
// 先把 mat[:][0] 的值放入 pq
pq := make(priorityQueue, pqLen)
for l := 0; l < k && l < len(a); l++ {
pq[l] = &pair{i: l, j: 0, sum: a[l] + b[0]}
}
// 初始化 pq
heap.Init(&pq)
var min *pair
for ; k > 0 && len(pq) > 0; k-- {
// 获取 heap 中 sum 值最小的 pair
min = heap.Pop(&pq).(*pair)
// 加入到 res
res = append(res, []int{a[min.i], b[min.j]})
// mat[i][j] 被 pop 出去了,就把 mat[i][j+1] push 到 pq
// 保证 mat 中每一行都有一个最小的在 pq 中,
// 就可以保证 pq 中的 min 就是下一个 sum值最小的元素
if min.j+1 < len(b) {
heap.Push(&pq, &pair{i: min.i, j: min.j + 1, sum: a[min.i] + b[min.j+1]})
}
}
return res
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"sync"
)
type (
PoolCnt struct {
Bus chan []byte
sync.Mutex
m map[string]*PoolUnit
}
)
const (
BUS_LEN = 2000000
)
func NewPoolCnt() *PoolCnt {
bean := &PoolCnt{
Bus: make(chan []byte, BUS_LEN),
m: make(map[string]*PoolUnit),
}
go bean.loop()
return bean
}
func (r *PoolCnt) loop() {
defer func() {
if err := recover(); err != nil {
log.Println(err)
}
}()
for {
fmt.Println(" .......loop bus wait.....len(Bus)..", len(r.Bus))
select {
case data, ok := <-r.Bus:
if ok {
fmt.Println(" BUS read", string(data), " len:", len(r.Bus))
if err := r.dispatch(data); err != nil {
fmt.Println(err)
} else {
fmt.Println("message has leave bus-------")
}
}
}
}
}
func (r *PoolCnt) GetCnt(key string) *PoolUnit {
r.Lock()
defer r.Unlock()
if conn, ok := r.m[key]; ok {
return conn
}
return nil
}
func (r *PoolCnt) Push(key string, conn *PoolUnit) {
fmt.Println("Push,id:", key)
r.Lock()
defer r.Unlock()
r.m[key] = conn
}
func (r *PoolCnt) Exist(key string) bool {
r.Lock()
defer r.Unlock()
_, ok := r.m[key]
return ok
}
func (r *PoolCnt) dispatch(buffer []byte) error {
fmt.Println("PoolCnt-->dispatch")
defer func() {
if err := recover(); err != nil {
fmt.Println("dispatch err:", err)
}
}()
if len(buffer) == 0 {
return errors.New("no data to send")
}
//
bean := new(MsgData)
if err := json.Unmarshal(buffer, bean); err != nil {
return err
}
//
fmt.Println("PoolCnt-->dispatch,before 11111111")
_, dst := r.getSrcDst(bean)
//
if obj := r.GetUnit(dst); obj != nil {
fmt.Println("PoolCnt-->dispatch,before 22222")
if s, err := json.Marshal(bean); err == nil {
fmt.Println("PoolCnt-->dispatch,before 33333")
return obj.Dispatch(s)
} else {
return err
}
}
return nil
}
func (r *PoolCnt) getSrcDst(bean *MsgData) (src, dst string) {
return bean.Src, bean.Dst
}
func (r *PoolCnt) GetUnit(key string) *PoolUnit {
obj, ok := r.m[key]
if !ok || obj == nil {
return nil
}
return obj
}
|
package main // import "github.com/kidoda/quikalarm"
import (
"github.com/spf13/pflag"
)
func main() {
var Usage = `Usage: quikalarm [options...] [arguments]
Simple alarm clock with few options.
Mandatory arguments to long options are mandatory for short options too.
Options:
-z, --snooze-time set snooze duration; The snooze prompt will appear once the alert buzzer starts;
it defaults to 'YES', so just pressing 'ENTER' will snooze for this amount of time.
-s, --set set the alarm wake-up time; argument in 12h or 24h format,
in the form [hour:minute AM/PM]. (e.g., 9:00AM, 22:00, or 08:30PM)
-t, --timer timer mode; in this mode the arguments given to -s, --set will be treated
as a duration instead of an absolute time.`
var (
clock = NewClock()
buzz = loadBuzzer()
)
print(Usage)
}
|
package main
import (
"bytes"
"io"
"net/http"
"testing"
)
const webSite = "http://example.com"
func TestWget(t *testing.T) {
var buf, buf2 bytes.Buffer
if err := wget(webSite, &buf); err != nil {
t.Fatalf("%v", err)
}
resp, err := http.Get(webSite)
if err != nil {
t.Fatalf("%v", err)
}
defer resp.Body.Close()
if _, err = io.Copy(&buf2, resp.Body); err != nil {
t.Fatalf("%v", err)
}
if bytes.Compare(buf.Bytes(), buf2.Bytes()) != 0 {
t.Fatalf("Fetching %v: want %v got %v", webSite, buf2, buf)
}
}
|
package stylei
import (
"database/sql"
"github.com/dropbox/godropbox/memcache"
"github.com/wgyuuu/storage"
)
func NewTesStorage(db *sql.DB, mc memcache.Client, prefereExpireTime int) storage.ComplexStorageProxy {
encoding := TesEncoding{}
msStorage := storage.NewComplexMysqlStorage(db, encoding)
mcStorage := storage.NewMemcStorage(mc, "tes", prefereExpireTime, encoding)
return storage.NewComplexStorageProxy(mcStorage, msStorage)
}
|
package chrono
import "time"
type Zone string
type Format string
const (
Zone_UTC Zone = "UTC"
Zone_Bangkok Zone = "Asia/Bangkok"
)
const (
Format_ISO8601 Format = "2006-01-02T15:04:05.000-0700"
Format_TopValue Format = "2006-01-02 15:04:05"
)
var defaultZone Zone
var defaultLocation *time.Location
func SetDefaultZone(zone Zone) {
defaultZone = zone
defaultLocation, _ = time.LoadLocation(string(zone))
}
|
package models
import (
"hotpler.com/v1/lib/common"
)
// Post data model
type Post struct {
Id string `gorm:"column:id;type:varchar(26);primary_key"`
CreateAt int64 `gorm:"column:create_at;type:bigint(20)"`
UpdateAt int64 `gorm:"column:update_at;type:bigint(20)"`
DeleteAt int64 `gorm:"column:delete_at;type:bigint(20)"`
Title string `gorm:"column:title;type:varchar(128)"`
Text string `gorm:"column:text;type:Text"`
User User `gorm:"foreignkey:user_id"`
UserId string `gorm:"column:user_id;type:varchar(26)"`
}
// Serialize serializes post data
func (p Post) Serialize() common.JSON {
return common.JSON{
"id" : p.Id,
"create_at" : p.CreateAt,
"update_at" : p.UpdateAt,
"title" : p.Title,
"text" : p.Text,
"user" : p.User.Serialize(),
}
}
|
package main
import (
"context"
"database/sql"
"fmt"
irc "github.com/fluffle/goirc/client"
_ "github.com/lib/pq"
"github.com/saegewerk/GoTwitchRouter/pkg/config"
DB "github.com/saegewerk/GoTwitchRouter/pkg/db"
"github.com/saegewerk/GoTwitchRouter/pkg/twitchchat"
"github.com/saegewerk/GoTwitchRouter/pkg/twitchrouter"
"google.golang.org/grpc"
"io"
"log"
"net"
"os"
"time"
)
var (
twitchConfig *twitchchat.Configuration
twitch *twitchchat.Chat
)
func initConfiguration(nickname, channel string) {
oauth := "oauth:" + os.Getenv("TWITCH")
twitchConfig = twitchchat.NewConfiguration(nickname, oauth, channel)
}
type server struct {
twitchrouter.UnimplementedTwitchRouterServer
apps twitchrouter.AppsQueued
messages chan string
}
func (s *server) Message(stream twitchrouter.TwitchRouter_MessageServer) error {
ctx := stream.Context()
Queue := make(chan twitchrouter.CmdMessage)
Closed := false
Registered := false
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// receive data from stream
req, err := stream.Recv()
if err == io.EOF {
// return will close stream from server side
log.Println("exit")
return nil
}
if err != nil {
Closed = true
log.Printf("receive error %v", err)
continue
}
if req.Msg != nil {
twitch.SendMessage(*req.Msg)
}
if !(Registered) {
go func() {
for {
r := <-Queue
uuid := r.Uuid.String()
err = stream.Send(&twitchrouter.MessageResponse{
Msg: &r.Msg,
Command: &r.Cmd,
Uuid: &uuid,
})
if err != nil {
println(err.Error())
Closed = true
break
}
}
}()
s.apps.Queue <- twitchrouter.App{
CmdRegister: twitchrouter.CmdRegister{
Cmd: *req.Command,
Help: *req.Help,
AccessLevel: *req.AccessLevel,
},
Queue: &Queue,
Closed: &Closed,
}
Registered = true
}
}
return nil
}
func main() {
c, err := config.YAML()
if err != nil {
println(err.Error())
return
}
initConfiguration(c.Twitch.Nickname, c.Twitch.Channel)
db, err := sql.Open("postgres", c.SprintfDBConfig())
if err != nil {
println(err.Error())
return
}
queries := DB.New(db)
defer func() {
err = db.Close()
if err != nil {
log.Fatal(err)
}
}()
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", 8765))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
server := &server{apps: twitchrouter.AppsQueued{
Queue: make(chan twitchrouter.App),
Apps: make([]twitchrouter.App, 0),
}}
twitchrouter.RegisterTwitchRouterServer(grpcServer, server)
twitch = twitchchat.NewChat(twitchConfig)
go server.runWithCallbacks(twitch, queries)
if err = grpcServer.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
func (s *server) runWithCallbacks(twitch *twitchchat.Chat, queries *DB.Queries) {
stop := make(chan struct{})
defer close(stop)
err := twitch.ConnectWithCallbacks(
func() {
fmt.Println("Connected to Twitch IRC")
},
func() {
fmt.Println("Disconnected from Twitch IRC")
stop <- struct{}{}
},
func(message *irc.Line, event string) {
ctx := context.Background()
userUUID, err := queries.GetUserUUIDByNick(ctx, message.Nick)
if err != nil {
if err == sql.ErrNoRows {
userUUID, err = queries.InsertUser(ctx, DB.InsertUserParams{
Nick: message.Nick,
Name: message.Args[0][1:],
Accesslevel: 0,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
})
if err != nil {
println(err.Error())
}
} else {
println(err.Error())
}
}
msgID := ""
if event == "USERNOTICE" {
msgID = message.Tags["msg-id"]
}
msgUUID, err := queries.InsertMessage(ctx, DB.InsertMessageParams{
Msg: message.Args[1],
MsgID: msgID,
Event: event,
FkUser: userUUID,
})
for i := 0; i < len(s.apps.Apps); {
if *s.apps.Apps[i].Closed {
s.apps.Apps = append(s.apps.Apps[:i], s.apps.Apps[i+1:]...)
} else {
i++
}
}
//check app queue
select {
case app, ok := <-s.apps.Queue:
if ok {
s.apps.Apps = append(s.apps.Apps, app)
} else {
fmt.Println("Channel closed!")
}
default:
}
log.Printf("Event: %s, Nick: %s, MsgId: %s, Msg: %s\n", event, message.Nick, msgID, message.Args[1])
if message.Args[1][0] == '!' {
if len(message.Args[1]) == 1 {
res := ""
for _, app := range s.apps.Apps {
tmp := res + "!" + app.CmdRegister.Cmd + ": " + res + app.CmdRegister.Help + "; "
//check if msg is too long
if len(tmp)+len(res) >= 500 {
twitch.SendMessage(res)
res = tmp
} else {
res = res + tmp
}
}
twitch.SendMessage(res)
}
for _, app := range s.apps.Apps {
msg := message.Args[1][len(app.CmdRegister.Cmd)+1:]
if message.Args[1][1:len(app.CmdRegister.Cmd)+1] == app.CmdRegister.Cmd &&
"join" != app.CmdRegister.Cmd && "part" != app.CmdRegister.Cmd && "usernotice" != app.CmdRegister.Cmd {
select {
case *app.Queue <- twitchrouter.CmdMessage{
Cmd: app.CmdRegister.Cmd,
Msg: msg,
MsgId: msgID,
Uuid: msgUUID,
}:
default:
}
//twitch.SendMessage(*res.Msg)
} else if "join" == app.CmdRegister.Cmd && event == "join" {
select {
case *app.Queue <- twitchrouter.CmdMessage{
Cmd: app.CmdRegister.Cmd,
Msg: msg,
MsgId: msgID,
Uuid: msgUUID,
}:
default:
}
} else if "part" == app.CmdRegister.Cmd && event == "part" {
select {
case *app.Queue <- twitchrouter.CmdMessage{
Cmd: app.CmdRegister.Cmd,
Msg: msg,
MsgId: msgID,
Uuid: msgUUID,
}:
default:
}
} else if "usernotice" == app.CmdRegister.Cmd && event == "usernotice" {
select {
case *app.Queue <- twitchrouter.CmdMessage{
Cmd: app.CmdRegister.Cmd,
Msg: msg,
MsgId: msgID,
Uuid: msgUUID,
}:
default:
}
}
}
}
},
)
if err != nil {
return
}
<-stop
}
|
package sort_util
import "github.com/Lxy417165709/LeetCode-Golang/新刷题/util/struct_util"
// QuickSort 快速排序。
func QuickSort(nums []int) {
// 1. 元素小于2的数组,直接返回。
if len(nums) < 2 {
return
}
// 2. 分区
index := Partition(nums)
// 3. 递归。
QuickSort(nums[:index])
QuickSort(nums[index+1:])
}
// Partition 分区。
func Partition(nums []int) int {
// 1. 保证: 左边的数 <= 基准值 <= 右边的数。
reference := nums[0]
left, right := 0, len(nums)-1
for left <= right {
// 1.1 自左向右,获取第一个大于基准值的索引。
for left <= right && nums[left] <= reference {
left++
}
// 1.2 自右向左,获取第一个小于基准值的索引。
for left <= right && nums[right] >= reference {
right--
}
// 1.3 保证两个索引存在。 (如果其中一个不存在,此时必然是 left = right+1)
if left <= right {
// 1.3.1 交换。
nums[left], nums[right] = nums[right], nums[left]
}
}
// 2. 摆正基准值位置。
// 为什么这里一定要是 right 呢? 因为 right 最终指向的数一定小于等于基准值。场景: [1 2 9 6 8]
nums[0], nums[right] = nums[right], nums[0]
// 3. 返回基准值位置。
return right
}
// HeapSort 堆排序。
func HeapSort(nums []int) []int {
// 1. 建堆。
heap := struct_util.NewMyHeap(len(nums), func(a, b interface{}) bool {
return a.(int) < b.(int)
})
// 2. 插入数组元素。
for _, num := range nums {
heap.Push(num)
}
// 3. 获取排序结果。
result := make([]int, 0)
for _, obj := range heap.Sort() {
result = append(result, obj.(int))
}
// 4. 返回。
return result
}
|
package pkg
var NameData [8]byte
var Name string |
package release
import (
"io/ioutil"
"os"
"github.com/ExploratoryEngineering/reto/pkg/toolbox"
)
const (
initialVersion = "0.0.0"
archiveDir = "release/archives"
releaseDir = "release/releases"
templateDir = "release/templates"
)
// InitTool initializes the directory structure for the tool. Errors are printed
// to stderr.
func InitTool() error {
// Make sure the release directory doesn't exist
err := os.MkdirAll(releaseDir, toolbox.DefaultDirPerm)
if err != nil {
toolbox.PrintError("Error creating the release directory: %v", err)
return err
}
if err := os.MkdirAll(templateDir, toolbox.DefaultDirPerm); err != nil {
toolbox.PrintError("Could not create the template directory: %v", err)
return err
}
if err := os.MkdirAll(archiveDir, toolbox.DefaultDirPerm); err != nil {
toolbox.PrintError("Could not create the archive directory: %v", err)
return err
}
f, err := os.Create(VersionFile)
if os.IsExist(err) {
toolbox.PrintError("The VERSION file already exists in the release directory")
return err
}
if err != nil {
toolbox.PrintError("Error creating the %s file: %v", VersionFile, err)
return err
}
defer f.Close()
_, err = f.Write([]byte(initialVersion))
if os.IsPermission(err) {
toolbox.PrintError("Permission denied on the %s file. Can't write initial version", VersionFile)
return err
}
if err != nil {
toolbox.PrintError("Error writing initial version to the %s file: %v", VersionFile, err)
return err
}
if err := initTemplates(); err != nil {
return err
}
if err := writeSampleConfig(); err != nil {
return err
}
if err := ioutil.WriteFile("release/.gitignore", []byte("archives\n"), toolbox.DefaultFilePerm); err != nil {
toolbox.PrintError("Could not create .gitignore file in release directory: %v", err)
return err
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.