text stringlengths 11 4.05M |
|---|
package initial_cluster
import (
util "github.com/verlandz/clustering-phone/utility"
)
const (
N_DIVIDER = "N-Divider"
RANDOMIZED = "Randomized"
)
type req struct {
N_Data int
N_Feature int
N_Cluster int
data []util.Data
distance string
}
func Get(N_Data, N_Feature, N_Cluster int, data []util.Data, chs, distance string) [][]float64 {
r := req{
N_Data: N_Data,
N_Feature: N_Feature,
N_Cluster: N_Cluster,
data: data,
distance: distance,
}
switch chs {
case N_DIVIDER:
return r.Ndivider()
case RANDOMIZED:
return r.Randomized()
default:
panic("unknow inital cluster")
}
}
|
/*
* @lc app=leetcode.cn id=204 lang=golang
*
* [204] 计数质数
*
* https://leetcode-cn.com/problems/count-primes/description/
*
* algorithms
* Easy (30.50%)
* Likes: 235
* Dislikes: 0
* Total Accepted: 35K
* Total Submissions: 112.2K
* Testcase Example: '10'
*
* 统计所有小于非负整数 n 的质数的数量。
*
* 示例:
*
* 输入: 10
* 输出: 4
* 解释: 小于 10 的质数一共有 4 个, 它们是 2, 3, 5, 7 。
*
*
*/
// @lc code=start
func countPrimes(n int) int {
if n == 0 || n == 1{
return 0
}
isPrime:=make([]int,n)
for i:=2;i*i<n;i++{
if isPrime[i]==0{
for j:=i*i;j<n;j=j+i{
isPrime[j]=1
}
}
}
count :=0
for _,i := range isPrime {
if i == 0{
count++
}
}
return count-2
}
// @lc code=end
|
package main
import (
"fmt"
"math"
)
func main() {
a := []int{2, 3, 1, 3, 3}
nextPermutation(a)
fmt.Println(a)
a = []int{1, 3, 2}
nextPermutation(a)
fmt.Println(a)
}
func nextPermutation(nums []int) {
i := len(nums) - 1
min := math.MaxInt32
for ; i > 0; i-- {
if nums[i] > nums[i-1] {
k := i
for j := i; j < len(nums); j++ {
if nums[i-1] < nums[j] && nums[j] <= min {
k = j
min = nums[j]
}
}
nums[k], nums[i-1] = nums[i-1], nums[k]
break
}
}
for j := 0; j <= (len(nums)-1-i)/2; j++ {
nums[i+j], nums[len(nums)-1-j] = nums[len(nums)-1-j], nums[i+j]
}
}
|
package global
import (
"io/ioutil"
"os"
"strings"
"github.com/jinzhu/configor"
)
type ConfigClass struct {
Conf *Config
}
var (
GlobalConfig = ConfigClass{}
)
func InitConfig(configFilePtr *string, secretFilePtr *string) {
GlobalConfig.LoadConfig(configFilePtr, secretFilePtr)
}
type ConfigDB struct {
User ConfigMysql `yaml:"user"`
Engine ConfigMysql `yaml:"engine"`
VIP ConfigMysql `yaml:"vip"`
}
type ConfigMysql struct {
Host string `yaml:"host"`
Username string `yaml:"username"`
Password string `yaml:"password"`
Database string `yaml:"database"`
Port uint32 `yaml:"port"`
IsAutoMigrate bool `yaml:"is_auto_migrate"`
LogMode bool `yaml:"log_mode"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxOpenConns int `yaml:"max_open_conns"`
}
type ConfigRedis struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
DB int `yaml:"db"`
}
type ConfigApp struct {
ENV string `yaml:"env"`
Port string `yaml:"port"`
}
type UrlParams struct {
ConfigCurrency string `yaml:"config"`
Balance string `yaml:"balance"`
Group string `yaml:"group"`
PathLocal string `yaml:"pathLocal"`
PathDebug string `yaml:"pathDebug"`
PathProd string `yaml:"pathProd"`
}
type ConfigParams struct {
PreDeliveryHours float64 `yaml:"pre_delivery_hours"`
}
type Config struct {
DB ConfigDB `yaml:"mysql"`
JwtPubKey string `yaml:"jwtPubKey"`
App ConfigApp `yaml:"app"`
Redis ConfigRedis `yaml:"redis"`
Urls UrlParams `yaml:"urls"`
}
func init() {
InitConfig(nil, nil)
}
func (this *ConfigClass) LoadConfig(configFilePtr *string, secretFilePtr *string) {
configPath := ``
secretPath := ``
if configFilePtr == nil {
configPath = "./config/"
} else {
configPath = *configFilePtr
}
if secretFilePtr == nil {
secretPath = "./secret/"
} else {
secretPath = *secretFilePtr
}
configfiles := GetConfigFiles(configPath, secretPath)
this.Conf = new(Config)
// 判断配置文件的来源
if os.Getenv("CONFIG_ETCD") != "" {
return
}
// 从配置文件中加载
err := configor.Load(this.Conf, configfiles...)
if err != nil {
msg := "Failed to load config file !!! " + err.Error()
panic(msg)
}
}
func GetConfigFiles(dirs ...string) []string {
configfiles := make([]string, 10)
for i := 0; i < len(dirs); i++ {
dir := dirs[i]
configfiles = walkDir(configfiles, dir)
}
return deleteEmpty(configfiles)
}
func walkDir(configfiles []string, dirname string) []string {
files, err := ioutil.ReadDir(dirname)
if err == nil {
for _, f := range files {
if strings.Contains(f.Name(), ".yaml") {
configfiles = append(configfiles, dirname+f.Name())
}
}
}
return configfiles
}
func deleteEmpty(configfiles []string) []string {
var retConfigfiles []string
for _, configfile := range configfiles {
if configfile != "" {
retConfigfiles = append(retConfigfiles, configfile)
}
}
return retConfigfiles
}
|
package blocker
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// DefaultBlockerTestSuite 是 DefaultBlocker 的单元测试的 Test Suite
type DefaultBlockerTestSuite struct {
suite.Suite
blockerPool *BlockerPool
}
// 改进单元测试
const (
blockerConfigPath = "./testdata/config_doc.yml"
blockerDBFile = "./data/GeoLite2-Country.mmdb.gz"
)
// SetupSuite 设置测试环境
func (suite *DefaultBlockerTestSuite) SetupSuite() {
configDoc, err := LoadConfig(blockerConfigPath)
if err != nil {
panic(err)
}
suite.blockerPool, err = NewBlockerPool(configDoc, blockerDBFile)
if err != nil {
panic(err)
}
}
// TestIsMacBlocked zone或者mac是否在白名单内
func (suite *DefaultBlockerTestSuite) TestIsMacBlocked() {
t := suite.T()
clientID := "jm-10002"
// mac := "30451143FAEE"
mac := "30451143D99A"
zone := "CN"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IsMacBlocked(mac, zone)
assert.Equal(t, false, ok)
}
// TestIsIPBlocked ip是否在白名单内
func (suite *DefaultBlockerTestSuite) TestIsIPBlocked() {
t := suite.T()
clientID := "jm-10002"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IsIPBlocked("114.236.8.103") // CN
assert.Equal(t, false, ok)
ok = blocker.IsIPBlocked("8.8.8.8") // US
assert.Equal(t, true, ok)
}
// TestIgnoreIPCheck mac是否在免ip过滤白名单内
func (suite *DefaultBlockerTestSuite) TestIgnoreIPCheck() {
t := suite.T()
clientID := "jm-10004"
mac := "BBC123456789"
blocker := suite.blockerPool.GetBlocker(clientID)
ok := blocker.IgnoreIPCheck(mac)
assert.Equal(t, true, ok)
}
func TestDefaultBlockerTestSuite(t *testing.T) {
suite.Run(t, new(DefaultBlockerTestSuite))
}
|
package clickhousespanstore
import (
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"math/rand"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"github.com/hashicorp/go-hclog"
"github.com/DATA-DOG/go-sqlmock"
"github.com/gogo/protobuf/proto"
"github.com/jaegertracing/jaeger/model"
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
)
const (
testTagCount = 10
testLogCount = 5
testLogFieldCount = 5
testIndexTable = "test_index_table"
testSpansTable = "test_spans_table"
)
type expectation struct {
preparation string
execArgs [][]driver.Value
}
var (
errorMock = fmt.Errorf("error mock")
process = model.NewProcess("test_service", []model.KeyValue{model.String("test_process_key", "test_process_value")})
testSpan = model.Span{
TraceID: model.NewTraceID(1, 2),
SpanID: model.NewSpanID(3),
OperationName: "GET /unit_test",
StartTime: testStartTime,
Process: process,
Tags: []model.KeyValue{model.String("test_string_key", "test_string_value"), model.Int64("test_int64_key", 4)},
Logs: []model.Log{{Timestamp: testStartTime, Fields: []model.KeyValue{model.String("test_log_key", "test_log_value")}}},
Duration: time.Minute,
}
testSpans = []*model.Span{&testSpan}
keys, values = uniqueTagsForSpan(&testSpan)
indexWriteExpectation = expectation{
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?)", testIndexTable),
execArgs: [][]driver.Value{{
testSpan.StartTime,
testSpan.TraceID.String(),
testSpan.Process.GetServiceName(),
testSpan.OperationName,
testSpan.Duration.Microseconds(),
keys,
values,
}}}
writeBatchLogs = []mocks.LogMock{{Msg: "Writing spans", Args: []interface{}{"size", len(testSpans)}}}
)
func TestSpanWriter_TagString(t *testing.T) {
tests := map[string]struct {
kv model.KeyValue
expected string
}{
"string value": {kv: model.String("tag_key", "tag_string_value"), expected: "tag_key=tag_string_value"},
"true value": {kv: model.Bool("tag_key", true), expected: "tag_key=true"},
"false value": {kv: model.Bool("tag_key", false), expected: "tag_key=false"},
"positive int value": {kv: model.Int64("tag_key", 1203912), expected: "tag_key=1203912"},
"negative int value": {kv: model.Int64("tag_key", -1203912), expected: "tag_key=-1203912"},
"float value": {kv: model.Float64("tag_key", 0.005009), expected: "tag_key=0.005009"},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
assert.Equal(t, test.expected, tagString(&test.kv), "Incorrect tag string")
})
}
}
func TestSpanWriter_UniqueTagsForSpan(t *testing.T) {
tests := map[string]struct {
tags []model.KeyValue
processTags []model.KeyValue
logs []model.Log
expectedKeys []string
expectedValues []string
}{
"default": {
tags: []model.KeyValue{model.String("key2", "value")},
processTags: []model.KeyValue{model.Int64("key3", 412)},
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
expectedKeys: []string{"key1", "key2", "key3"},
expectedValues: []string{"0.5", "value", "412"},
},
"repeating tags": {
tags: []model.KeyValue{model.String("key2", "value"), model.String("key2", "value")},
processTags: []model.KeyValue{model.Int64("key3", 412)},
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
expectedKeys: []string{"key1", "key2", "key3"},
expectedValues: []string{"0.5", "value", "412"},
},
"repeating keys": {
tags: []model.KeyValue{model.String("key2", "value_a"), model.String("key2", "value_b")},
processTags: []model.KeyValue{model.Int64("key3", 412)},
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
expectedKeys: []string{"key1", "key2", "key2", "key3"},
expectedValues: []string{"0.5", "value_a", "value_b", "412"},
},
"repeating values": {
tags: []model.KeyValue{model.String("key2", "value"), model.Int64("key4", 412)},
processTags: []model.KeyValue{model.Int64("key3", 412)},
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
expectedKeys: []string{"key1", "key2", "key3", "key4"},
expectedValues: []string{"0.5", "value", "412", "412"},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
process := model.Process{Tags: test.processTags}
span := model.Span{Tags: test.tags, Process: &process, Logs: test.logs}
actualKeys, actualValues := uniqueTagsForSpan(&span)
assert.Equal(t, test.expectedKeys, actualKeys)
assert.Equal(t, test.expectedValues, actualValues)
})
}
}
func TestSpanWriter_General(t *testing.T) {
spanJSON, err := json.Marshal(&testSpan)
require.NoError(t, err)
modelWriteExpectationJSON := getModelWriteExpectation(spanJSON)
spanProto, err := proto.Marshal(&testSpan)
require.NoError(t, err)
modelWriteExpectationProto := getModelWriteExpectation(spanProto)
tests := map[string]struct {
encoding Encoding
indexTable TableName
spans []*model.Span
expectations []expectation
action func(writeWorker *WriteWorker, spans []*model.Span) error
expectedLogs []mocks.LogMock
}{
"write index batch": {
encoding: EncodingJSON,
indexTable: testIndexTable,
spans: testSpans,
expectations: []expectation{indexWriteExpectation},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeIndexBatch(spans) },
},
"write model batch JSON": {
encoding: EncodingJSON,
indexTable: testIndexTable,
spans: testSpans,
expectations: []expectation{modelWriteExpectationJSON},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
},
"write model bach Proto": {
encoding: EncodingProto,
indexTable: testIndexTable,
spans: testSpans,
expectations: []expectation{modelWriteExpectationProto},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
},
"write batch no index JSON": {
encoding: EncodingJSON,
indexTable: "",
spans: testSpans,
expectations: []expectation{modelWriteExpectationJSON},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
expectedLogs: writeBatchLogs,
},
"write batch no index Proto": {
encoding: EncodingProto,
indexTable: "",
spans: testSpans,
expectations: []expectation{modelWriteExpectationProto},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
expectedLogs: writeBatchLogs,
},
"write batch JSON": {
encoding: EncodingJSON,
indexTable: testIndexTable,
spans: testSpans,
expectations: []expectation{modelWriteExpectationJSON, indexWriteExpectation},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
expectedLogs: writeBatchLogs,
},
"write batch Proto": {
encoding: EncodingProto,
indexTable: testIndexTable,
spans: testSpans,
expectations: []expectation{modelWriteExpectationProto, indexWriteExpectation},
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
expectedLogs: writeBatchLogs,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
db, mock, err := mocks.GetDbMock()
require.NoError(t, err, "an error was not expected when opening a stub database connection")
defer db.Close()
spyLogger := mocks.NewSpyLogger()
worker := getWriteWorker(spyLogger, db, test.encoding, test.indexTable)
for _, expectation := range test.expectations {
mock.ExpectBegin()
prep := mock.ExpectPrepare(expectation.preparation)
for _, args := range expectation.execArgs {
prep.ExpectExec().WithArgs(args...).WillReturnResult(sqlmock.NewResult(1, 1))
}
mock.ExpectCommit()
}
assert.NoError(t, test.action(&worker, test.spans))
assert.NoError(t, mock.ExpectationsWereMet())
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
})
}
}
func TestSpanWriter_BeginError(t *testing.T) {
tests := map[string]struct {
action func(writeWorker *WriteWorker) error
expectedLogs []mocks.LogMock
}{
"write model batch": {action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) }},
"write index batch": {action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) }},
"write batch": {
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
expectedLogs: writeBatchLogs,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
db, mock, err := mocks.GetDbMock()
require.NoError(t, err, "an error was not expected when opening a stub database connection")
defer db.Close()
spyLogger := mocks.NewSpyLogger()
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
mock.ExpectBegin().WillReturnError(errorMock)
assert.ErrorIs(t, test.action(&writeWorker), errorMock)
assert.NoError(t, mock.ExpectationsWereMet())
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
})
}
}
func TestSpanWriter_PrepareError(t *testing.T) {
spanJSON, err := json.Marshal(&testSpan)
require.NoError(t, err)
modelWriteExpectation := getModelWriteExpectation(spanJSON)
tests := map[string]struct {
action func(writeWorker *WriteWorker) error
expectation expectation
expectedLogs []mocks.LogMock
}{
"write model batch": {
action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) },
expectation: modelWriteExpectation,
},
"write index batch": {
action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) },
expectation: indexWriteExpectation,
},
"write batch": {
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
expectation: modelWriteExpectation,
expectedLogs: writeBatchLogs,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
db, mock, err := mocks.GetDbMock()
require.NoError(t, err, "an error was not expected when opening a stub database connection")
defer db.Close()
spyLogger := mocks.NewSpyLogger()
spanWriter := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
mock.ExpectBegin()
mock.ExpectPrepare(test.expectation.preparation).WillReturnError(errorMock)
mock.ExpectRollback()
assert.ErrorIs(t, test.action(&spanWriter), errorMock)
assert.NoError(t, mock.ExpectationsWereMet())
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
})
}
}
func TestSpanWriter_ExecError(t *testing.T) {
spanJSON, err := json.Marshal(&testSpan)
require.NoError(t, err)
modelWriteExpectation := getModelWriteExpectation(spanJSON)
tests := map[string]struct {
indexTable TableName
expectations []expectation
action func(writer *WriteWorker) error
expectedLogs []mocks.LogMock
}{
"write model batch": {
indexTable: testIndexTable,
expectations: []expectation{modelWriteExpectation},
action: func(writer *WriteWorker) error { return writer.writeModelBatch(testSpans) },
},
"write index batch": {
indexTable: testIndexTable,
expectations: []expectation{indexWriteExpectation},
action: func(writer *WriteWorker) error { return writer.writeIndexBatch(testSpans) },
},
"write batch no index": {
indexTable: "",
expectations: []expectation{modelWriteExpectation},
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
expectedLogs: writeBatchLogs,
},
"write batch": {
indexTable: testIndexTable,
expectations: []expectation{modelWriteExpectation, indexWriteExpectation},
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
expectedLogs: writeBatchLogs,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
db, mock, err := mocks.GetDbMock()
require.NoError(t, err, "an error was not expected when opening a stub database connection")
defer db.Close()
spyLogger := mocks.NewSpyLogger()
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
for i, expectation := range test.expectations {
mock.ExpectBegin()
prep := mock.ExpectPrepare(expectation.preparation)
if i < len(test.expectations)-1 {
for _, args := range expectation.execArgs {
prep.ExpectExec().WithArgs(args...).WillReturnResult(sqlmock.NewResult(1, 1))
}
mock.ExpectCommit()
} else {
prep.ExpectExec().WithArgs(expectation.execArgs[0]...).WillReturnError(errorMock)
mock.ExpectRollback()
}
}
assert.ErrorIs(t, test.action(&writeWorker), errorMock)
assert.NoError(t, mock.ExpectationsWereMet())
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
})
}
}
func getWriteWorker(spyLogger mocks.SpyLogger, db *sql.DB, encoding Encoding, indexTable TableName) WriteWorker {
return WriteWorker{
params: &WriteParams{
logger: spyLogger,
db: db,
spansTable: testSpansTable,
indexTable: indexTable,
encoding: encoding,
},
workerDone: make(chan *WriteWorker),
}
}
func generateRandomSpans(count int) []*model.Span {
spans := make([]*model.Span, count)
for i := 0; i < count; i++ {
span := generateRandomSpan()
spans[i] = &span
}
return spans
}
func generateRandomSpan() model.Span {
processTags := generateRandomKeyValues(testTagCount)
process := model.Process{
ServiceName: "service" + strconv.FormatUint(rand.Uint64(), 10),
Tags: processTags,
}
span := model.Span{
TraceID: model.NewTraceID(rand.Uint64(), rand.Uint64()),
SpanID: model.NewSpanID(rand.Uint64()),
OperationName: "operation" + strconv.FormatUint(rand.Uint64(), 10),
StartTime: getRandomTime(),
Process: &process,
Tags: generateRandomKeyValues(testTagCount),
Logs: generateRandomLogs(),
Duration: time.Unix(rand.Int63n(1<<32), 0).Sub(time.Unix(0, 0)),
}
return span
}
func generateRandomLogs() []model.Log {
logs := make([]model.Log, 0, testLogCount)
for i := 0; i < testLogCount; i++ {
timestamp := getRandomTime()
logs = append(logs, model.Log{Timestamp: timestamp, Fields: generateRandomKeyValues(testLogFieldCount)})
}
return logs
}
func getRandomTime() time.Time {
return time.Unix(rand.Int63n(time.Now().Unix()), 0)
}
func generateRandomKeyValues(count int) []model.KeyValue {
tags := make([]model.KeyValue, 0, count)
for i := 0; i < count; i++ {
key := "key" + strconv.FormatUint(rand.Uint64(), 16)
value := "key" + strconv.FormatUint(rand.Uint64(), 16)
kv := model.KeyValue{Key: key, VType: model.ValueType_STRING, VStr: value}
tags = append(tags, kv)
}
return tags
}
func getModelWriteExpectation(spanJSON []byte) expectation {
return expectation{
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", testSpansTable),
execArgs: [][]driver.Value{{
testSpan.StartTime,
testSpan.TraceID.String(),
spanJSON,
}},
}
}
|
package refcount
import (
"reflect"
"sync"
"sync/atomic"
)
// Interface following reference countable interface.
// We have provided inbuilt embeddable implementation of the reference countable entryPool.
// This interface just provides the extensibility for the implementation.
type ReferenceCountable interface {
// Method to set the current instance
SetInstance(i interface{})
// Method to increment the reference count
IncrementReferenceCount()
// Method to decrement reference count
DecrementReferenceCount()
}
type resetObjFunc func(interface{}) error
// Struct representing a reference.
// This struct is supposed to be embedded inside the object to be pooled.
// Along with that incrementing and decrementing the references is highly important specifically around goroutines.
type ReferenceCounter struct {
count *uint32
destination *sync.Pool
released *uint32
Instance interface{}
reset resetObjFunc
id uint32
}
// Method to increment a reference.
func (r ReferenceCounter) IncrementReferenceCount() {
atomic.AddUint32(r.count, 1)
}
// Method to decrement a reference.
// If the reference count goes to zero, the object is put back inside the entryPool.
func (r ReferenceCounter) DecrementReferenceCount() {
if atomic.LoadUint32(r.count) == 0 {
panic("this should not happen =>" + reflect.TypeOf(r.Instance).String())
}
decrementedCount := atomic.AddUint32(r.count, ^uint32(0))
if decrementedCount == 0 {
// Mark that object is released by incrementing the released count.
atomic.AddUint32(r.released, 1)
// Reset object to its zero values.
if err := r.reset(r.Instance); err != nil {
panic("error while resetting an instance => " + err.Error())
}
// Put object in the entry entryPool.
r.destination.Put(r.Instance)
// Stop tracking this current instance.
r.Instance = nil
}
}
// Method to set the current instance
func (r *ReferenceCounter) SetInstance(i interface{}) {
r.Instance = i
}
|
package propertypricehistorycom_test
import (
. "github.com/DennisDenuto/property-price-collector/site/propertypricehistorycom"
"fmt"
"github.com/DennisDenuto/property-price-collector/data"
"github.com/DennisDenuto/property-price-collector/site"
"github.com/DennisDenuto/property-price-collector/site/propertypricehistorycom/propertypricehistorycomfakes"
"github.com/PuerkitoBio/fetchbot"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"net/url"
"time"
)
var _ = Describe("HistoricalPropertyScraper", func() {
var scraper PropertyPriceHistoryCom
var testMux *fetchbot.Mux
var fetcher *fetchbot.Fetcher
var server *ghttp.Server
BeforeEach(func() {
server = ghttp.NewServer()
testMux = fetchbot.NewMux()
urlParsed, err := url.Parse(server.URL())
Expect(err).ToNot(HaveOccurred())
lookup := &propertypricehistorycomfakes.FakePostcodeSuburbLookup{}
lookup.GetSuburbReturns([]site.Suburb{
{Name: "Kellyville Ridge", State: "NSW"},
}, true)
scraper = NewPropertyPriceHistoryCom(fmt.Sprintf("localhost:%s", urlParsed.Port()), 2155, 2155, lookup)
scraper.SetupMux(testMux)
fetcher = fetchbot.New(testMux)
fetcher.CrawlDelay = 0
fetcher.AutoClose = true
fetcher.WorkerIdleTTL = 1 * time.Second
})
AfterEach(func() {
server.Close()
})
Context("property list", func() {
BeforeEach(func() {
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/robots.txt"),
ghttp.RespondWith(200, `
User-agent: *
Disallow: /deny`,
),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/sold/list/NSW/2155/Kellyville+Ridge"),
ghttp.RespondWith(200, PropertyPriceHistory_list_nsw_2155),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/sold/list/NSW/2155/Kellyville+Ridge/2/"),
ghttp.RespondWith(200, PropertyPriceHistory_list_nsw_2155_last_page),
),
)
})
It("should map to properties", func() {
queue := fetcher.Start()
for _, seed := range scraper.SeedUrls {
queueCount, err := queue.SendStringGet(seed)
Expect(err).ToNot(HaveOccurred())
Expect(queueCount).To(Equal(1))
}
receivedProperty := &data.PropertyHistoryData{}
Eventually(scraper.GetProperties(), 2*time.Second).Should(Receive(receivedProperty))
Expect(receivedProperty.Address).To(Equal(data.Address{
AddressLine1: "16/11 Kilbenny Street",
State: "NSW",
Suburb: "Kellyville Ridge",
PostCode: "2155",
LonLat: data.LonLat{
Lon: "150.9276190",
Lat: "-33.6984460",
},
}))
Expect(receivedProperty.Type).To(Equal("apartment"))
Expect(receivedProperty.DateSold.String()).To(ContainSubstring("2017-06-08"))
Expect(receivedProperty.Price).To(Equal("N/A"))
Expect(receivedProperty.NumBeds).To(Equal("2"))
Expect(receivedProperty.NumBaths).To(Equal("3"))
Expect(receivedProperty.NumCars).To(Equal("1"))
Eventually(func() bool {
for p := range scraper.GetProperties() {
if p.Address.AddressLine1 == "1075 West Jindalee Road" {
return true
}
}
return false
}, 2*time.Second).Should(BeTrue())
})
})
})
|
// Copyright (c) 2013-2018 KIDTSUNAMI
// Author: alex@kidtsunami.com
package util
import (
"bytes"
"math"
"time"
)
func MinString(a, b string) string {
if a < b {
return a
}
return b
}
func MaxString(a, b string) string {
if a > b {
return a
}
return b
}
func MinBytes(a, b []byte) []byte {
if bytes.Compare(a, b) < 0 {
return a
}
return b
}
func MaxBytes(a, b []byte) []byte {
if bytes.Compare(a, b) > 0 {
return a
}
return b
}
func Max(x, y int) int {
if x < y {
return y
} else {
return x
}
}
func Min(x, y int) int {
if x > y {
return y
}
return x
}
func MaxN(nums ...int) int {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v > n {
n = v
}
}
return n
}
}
func MinN(nums ...int) int {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v < n {
n = v
}
}
return n
}
}
func NonZero(x ...int) int {
for _, v := range x {
if v != 0 {
return v
}
}
return 0
}
func NonZeroMin(x ...int) int {
var min int
for _, v := range x {
if v != 0 {
if min == 0 {
min = v
} else {
min = Min(min, v)
}
}
}
return min
}
func NonZeroMin64(x ...int64) int64 {
var min int64
for _, v := range x {
if v != 0 {
if min == 0 {
min = v
} else {
min = Min64(min, v)
}
}
}
return min
}
func Max64(x, y int64) int64 {
if x < y {
return y
}
return x
}
func Min64(x, y int64) int64 {
if x > y {
return y
}
return x
}
func Max64N(nums ...int64) int64 {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v > n {
n = v
}
}
return n
}
}
func Min64N(nums ...int64) int64 {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v < n {
n = v
}
}
return n
}
}
func MaxU64(x, y uint64) uint64 {
if x < y {
return y
}
return x
}
func MinU64(x, y uint64) uint64 {
if x > y {
return y
}
return x
}
func MinFloat64(x, y float64) float64 {
return math.Min(x, y)
}
func MaxFloat64(x, y float64) float64 {
return math.Max(x, y)
}
func MinFloat64N(nums ...float64) float64 {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v < n {
n = v
}
}
return n
}
}
func MaxFloat64N(nums ...float64) float64 {
switch len(nums) {
case 0:
return 0
case 1:
return nums[0]
default:
n := nums[0]
for _, v := range nums[1:] {
if v > n {
n = v
}
}
return n
}
}
func MaxTime(x, y time.Time) time.Time {
if x.After(y) {
return x
}
return y
}
func MinTime(x, y time.Time) time.Time {
if x.Before(y) {
return x
}
return y
}
func MaxDuration(a, b time.Duration) time.Duration {
if int64(a) < int64(b) {
return b
}
return a
}
func MinDuration(a, b time.Duration) time.Duration {
if int64(a) > int64(b) {
return b
}
return a
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"github.com/codingeasygo/serviced"
log "github.com/sirupsen/logrus"
)
func usage() {
switch runtime.GOOS {
case "windows":
fmt.Printf("Usage: serviced <install|uninstall|stat|stop|list|add|remove>\n")
fmt.Printf("\tinstall\t\t install windows service\n")
fmt.Printf("\tuninstall\t\t remove windows service\n")
default:
fmt.Printf("Usage: serviced <srv|stat|stop|list|add|remove>\n")
}
fmt.Printf("\tstart\t\t start group service\n")
fmt.Printf("\tstop\t\t stop group service\n")
fmt.Printf("\tlist\t\t list group service\n")
fmt.Printf("\tadd\t\t add group service\n")
fmt.Printf("\tremove\t\t remove group service\n")
fmt.Printf("\n")
}
func main() {
_, name := filepath.Split(os.Args[0])
name = strings.TrimSuffix(name, ".exe")
switch name {
case "serviced-srv":
if len(os.Args) > 1 && os.Args[1] == "srv" {
conf := ""
if len(os.Args) > 2 {
conf = os.Args[2]
}
runService(conf)
return
}
switch runtime.GOOS {
case "windows":
windowService()
default:
conf := ""
if len(os.Args) > 1 {
conf = os.Args[1]
}
runService(conf)
}
default:
runConsole()
}
}
var service *serviced.Manager
func runService(conf string) {
log.SetFormatter(NewPlainFormatter())
path, _ := exePath()
dir := filepath.Dir(path)
service = serviced.NewManager()
if len(conf) > 0 {
service.Filename = conf
} else {
service.Filename = filepath.Join(dir, "serviced.json")
}
switch runtime.GOOS {
case "windows":
service.TempDir = dir
default:
service.TempDir = os.TempDir()
}
err := service.Bootstrap()
if err != nil {
os.Exit(1)
return
}
service.StartAll(ioutil.Discard)
stop := make(chan os.Signal, 1)
signal.Notify(stop,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
<-stop
stopService()
}
func stopService() {
service.StopAll()
service.StopConsole()
}
func runConsole() {
if len(os.Args) < 3 {
usage()
return
}
path, _ := exePath()
dir := filepath.Dir(path)
c := serviced.NewConsole()
switch runtime.GOOS {
case "windows":
c.TempDir = dir
default:
c.TempDir = os.TempDir()
}
err := c.Bootstrap()
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
return
}
go c.CopyTo(os.Stdout)
defer c.Close()
switch os.Args[1] {
case "add":
path, _ := filepath.Abs(os.Args[2])
c.Add(path)
case "remove":
c.Remove(os.Args[2])
case "start":
c.Start(os.Args[2])
case "stop":
c.Stop(os.Args[2])
case "list":
c.List(os.Args[2])
default:
usage()
os.Exit(1)
}
}
func exePath() (string, error) {
var err error
prog := os.Args[0]
p, err := filepath.Abs(prog)
if err != nil {
return "", err
}
fi, err := os.Stat(p)
if err == nil {
if !fi.Mode().IsDir() {
return p, nil
}
err = fmt.Errorf("%s is directory", p)
}
if filepath.Ext(p) == "" {
var fi os.FileInfo
p += ".exe"
fi, err = os.Stat(p)
if err == nil {
if !fi.Mode().IsDir() {
return p, nil
}
err = fmt.Errorf("%s is directory", p)
}
}
return "", err
}
//PlainFormatter is logrus formatter
type PlainFormatter struct {
TimestampFormat string
LevelDesc []string
}
//NewPlainFormatter will create new formater
func NewPlainFormatter() (formatter *PlainFormatter) {
formatter = &PlainFormatter{
TimestampFormat: "2006-01-02 15:04:05",
LevelDesc: []string{"PANC", "FATL", "ERRO", "WARN", "INFO", "DEBG"},
}
return
}
//Format will format the log entry
func (f *PlainFormatter) Format(entry *log.Entry) ([]byte, error) {
timestamp := fmt.Sprintf(entry.Time.Format(f.TimestampFormat))
return []byte(fmt.Sprintf("%s %s %s\n", timestamp, f.LevelDesc[entry.Level], entry.Message)), nil
}
|
package router
import (
HomeHandler "github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/routes/home"
"github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/models"
StatusHandler "github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/routes/status"
)
func GetRoutes() models.Routes {
return models.Routes{
models.Route{Name: "Home", Method: "GET", Pattern: "/", HandlerFunc: HomeHandler.Index},
models.Route{Name: "Status", Method: "GET", Pattern: "/status", HandlerFunc: StatusHandler.Index},
}
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package context
import "fmt"
type node struct {
children []*node
value string
method []string
handle [][]Handler
}
func tree() *node {
return &node{
children: make([]*node, 0),
value: "/",
handle: nil,
}
}
func (n *node) hasMethod(method string) int {
for k, m := range n.method {
if m == method {
return k
}
}
return -1
}
func (n *node) addMethodAndHandler(method string, handler []Handler) {
n.method = append(n.method, method)
n.handle = append(n.handle, handler)
}
func (n *node) addChild(child *node) {
n.children = append(n.children, child)
}
func (n *node) addContent(value string) *node {
var child = n.search(value)
if child == nil {
child = &node{
children: make([]*node, 0),
value: value,
}
n.addChild(child)
}
return child
}
func (n *node) search(value string) *node {
for _, child := range n.children {
if child.value == value || child.value == "*" {
return child
}
}
return nil
}
func (n *node) addPath(paths []string, method string, handler []Handler) {
child := n
for i := 0; i < len(paths); i++ {
child = child.addContent(paths[i])
}
child.addMethodAndHandler(method, handler)
}
func (n *node) findPath(paths []string, method string) []Handler {
child := n
for i := 0; i < len(paths); i++ {
child = child.search(paths[i])
if child == nil {
return nil
}
}
methodIndex := child.hasMethod(method)
if methodIndex == -1 {
return nil
}
return child.handle[methodIndex]
}
func (n *node) print() {
fmt.Println(n)
}
func (n *node) printChildren() {
n.print()
for _, child := range n.children {
child.printChildren()
}
}
func stringToArr(path string) []string {
var (
paths = make([]string, 0)
start = 0
end int
isWildcard = false
)
for i := 0; i < len(path); i++ {
if i == 0 && path[0] == '/' {
start = 1
continue
}
if path[i] == ':' {
isWildcard = true
}
if i == len(path)-1 {
end = i + 1
if isWildcard {
paths = append(paths, "*")
} else {
paths = append(paths, path[start:end])
}
}
if path[i] == '/' {
end = i
if isWildcard {
paths = append(paths, "*")
} else {
paths = append(paths, path[start:end])
}
start = i + 1
isWildcard = false
}
}
return paths
}
|
package todolist
import (
"sort"
"strings"
"time"
)
type sortFunc func(p1, p2 *TodoStat) int
type StatSorter struct {
stats []*TodoStat
less sortFunc
}
func DateSort(asc bool) sortFunc {
d := func(t1, t2 *TodoStat) int {
ret := 0
if t1.PeriodStartDate.Before(t2.PeriodStartDate) {
ret = -1
} else if t1.PeriodStartDate.After(t2.PeriodStartDate) {
ret = 1
} else {
ret = 0
}
if asc {
return ret
} else {
return -1 * ret
}
}
return d
}
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
func (ss *StatSorter) Sort(stats []*TodoStat) {
ss.stats = stats
sort.Sort(ss)
}
// Len is part of sort.Interface.
func (ss *StatSorter) Len() int {
return len(ss.stats)
}
// Swap is part of sort.Interface.
func (ss *StatSorter) Swap(i, j int) {
ss.stats[i], ss.stats[j] = ss.stats[j], ss.stats[i]
}
func (ss *StatSorter) Less(i, j int) bool {
p, q := ss.stats[i], ss.stats[j]
// Try all but the last comparison.
res := ss.less(p, q)
switch res {
case -1:
// p < q, so we have a decision.
return true
case 1:
// p > q, so we have a decision.
return false
}
// case 0: //p == q; try the next comparison.
// All comparisons to here said "equal", so just return whatever
// the final comparison reports.
return false
}
type TodoStat struct {
PeriodStartDate time.Time
Pending int
Unpending int
Added int
Modified int
Completed int
Archived int
}
type StatsGroup struct {
Group string
PrevStat *TodoStat //Use to track pending todos across dates
Stats []*TodoStat
}
type StatsData struct {
Groups map[string]*StatsGroup
}
func (s *StatsData) GetSortedGroups() []*StatsGroup {
sortedGroups := []*StatsGroup{}
for k, _ := range s.Groups {
sortedGroups = append(sortedGroups, s.SortedGroup(k))
}
return sortedGroups
}
func (s *StatsData) SortedGroup(group string) *StatsGroup {
statsGroup := s.Groups[group]
lessFn := DateSort(true)
sorter := &StatSorter{}
sorter.less = lessFn
sorter.stats = statsGroup.Stats
sort.Sort(sorter)
return statsGroup
}
func (s *StatsData) CalcStats(todos []*Todo, groupBy string, sum int, rangeTimes []time.Time) {
doGroups := (groupBy != "" && !strings.HasPrefix(strings.ToLower(groupBy), "a"))
for _, todo := range todos {
if doGroups {
if strings.HasPrefix(strings.ToLower(groupBy), "p") {
projects := strings.Join(todo.Projects, ",")
s.CalcStatsForTodoAndGroup(todo, projects, sum)
} else {
contexts := strings.Join(todo.Contexts, ",")
s.CalcStatsForTodoAndGroup(todo, contexts, sum)
}
} else {
s.CalcStatsForTodoAndGroup(todo, "all", sum)
}
}
groups := s.GetSortedGroups()
for _, sg := range groups {
for _, stat := range sg.Stats {
if sg.PrevStat != nil {
stat.Pending = sg.PrevStat.Pending
}
sg.PrevStat = stat
stat.Pending += stat.Added
stat.Pending -= stat.Unpending
}
}
if len(rangeTimes) > 0 {
startDate := rangeTimes[0]
var endDate time.Time
if len(rangeTimes) > 1 {
endDate = rangeTimes[1]
} else {
endDate = Now
}
var rangeStats []*TodoStat
for _, sg := range groups {
for _, stat := range sg.Stats {
if stat.PeriodStartDate.Before(startDate) {
continue
}
if stat.PeriodStartDate.After(endDate) {
continue
}
rangeStats = append(rangeStats, stat)
}
sg.Stats = rangeStats
}
}
}
func (s *StatsData) CalcStatsForTodoAndGroup(todo *Todo, group string, sumBy int) {
addDate, _ := time.Parse(time.RFC3339, todo.CreatedDate)
modDate, _ := time.Parse(time.RFC3339, todo.ModifiedDate)
compDate, _ := time.Parse(time.RFC3339, todo.CompletedDate)
sg, ok := s.Groups[group]
if !ok {
sg = &StatsGroup{Group: group, Stats: []*TodoStat{}}
s.Groups[group] = sg
}
var stat *TodoStat
var pending = true
if sumBy == 1 { //weekly
startDateFunc := bow
stat = sg.getStatsForDate(startDateFunc(addDate))
stat.Added++
sg.getStatsForDate(startDateFunc(modDate)).Modified++
if todo.Completed {
stat = sg.getStatsForDate(startDateFunc(compDate))
stat.Completed++
if pending {
stat.Unpending++
pending = false
}
}
if todo.Status == "Archived" {
stat = sg.getStatsForDate(startDateFunc(modDate))
stat.Archived++
if pending {
stat.Unpending++
pending = false
}
}
} else if sumBy == 2 { //monthly
startDateFunc := bom
stat = sg.getStatsForDate(startDateFunc(addDate))
stat.Added++
sg.getStatsForDate(startDateFunc(modDate)).Modified++
if todo.Completed {
stat = sg.getStatsForDate(startDateFunc(compDate))
stat.Completed++
if pending {
stat.Unpending++
pending = false
}
}
if todo.Status == "Archived" {
stat = sg.getStatsForDate(startDateFunc(modDate))
stat.Archived++
if pending {
stat.Unpending++
pending = false
}
}
} else { //default to daily
startDateFunc := bod
stat = sg.getStatsForDate(startDateFunc(addDate))
stat.Added++
sg.getStatsForDate(startDateFunc(modDate)).Modified++
if todo.Completed {
stat = sg.getStatsForDate(startDateFunc(compDate))
stat.Completed++
if pending {
stat.Unpending++
pending = false
}
}
if todo.Status == "Archived" {
stat = sg.getStatsForDate(startDateFunc(modDate))
stat.Archived++
if pending {
stat.Unpending++
pending = false
}
}
}
}
func (sg *StatsGroup) getStatsForDate(date time.Time) *TodoStat {
var stat *TodoStat
for _, stat = range sg.Stats {
if stat.PeriodStartDate.Equal(date) {
return stat
}
}
stat = &TodoStat{PeriodStartDate: date}
sg.Stats = append(sg.Stats, stat)
return stat
}
|
package controllers
import (
"FinalProject/BlogApi/models"
"encoding/json"
"github.com/astaxie/beego"
)
// Operations about Users
type ArticleController struct {
beego.Controller
}
// @Title CreateUser
// @Description create users
// @Param body body models.User true "body for user content"
// @Success 200 {int} models.User.Id
// @Failure 403 body is empty
// @router / [post]
func (u *ArticleController) Post() {
var posts models.Posts
json.Unmarshal(u.Ctx.Input.RequestBody, &posts)
uid := models.AddPost(posts)
u.Data["json"] = map[string]string{"uid": uid}
u.ServeJSON()
}
// @Title GetAll
// @Description get all Users
// @Success 200 {object} models.Article
// @router / [get]
func (u *ArticleController) GetAll() {
article := models.GetPostAll()
u.Data["json"] = article
u.ServeJSON()
}
// @Title Get
// @Description get user by uid
// @Param uid path string true "The key for staticblock"
// @Success 200 {object} models.User
// @Failure 403 :uid is empty
// @router /:uid [get]
// func (u *ArticleController) Get() {
// uid := u.GetString(":uid")
// if uid != "" {
// user, err := models.GetUser(uid)
// if err != nil {
// u.Data["json"] = err.Error()
// } else {
// u.Data["json"] = user
// }
// }
// u.ServeJSON()
// }
|
package text
import (
"unicode"
"github.com/texttheater/golang-levenshtein/levenshtein"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/model"
)
var levensteinOpts = levenshtein.Options{
InsCost: 1,
DelCost: 1,
SubCost: 1,
Matches: func(r rune, r2 rune) bool {
return unicode.ToLower(r) == unicode.ToLower(r2)
},
}
type options struct {
minRatio float64
prefix string
}
type MatchCandidates interface {
Len() int
TextOf(idx int) string
}
func BestMatch(text string, candidates MatchCandidates, opts ...MatchOpt) (int, bool) {
options := options{minRatio: 0.7}
for _, opt := range opts {
opt(&options)
}
n := candidates.Len()
if n == 0 {
return -1, false
}
bestRatio := 0.0
bestMatchIdx := -1
matchText := func(idx int, text string, candidate string) {
ratio := levenshtein.RatioForStrings([]rune(text), []rune(candidate), levensteinOpts)
if ratio > bestRatio {
bestRatio = ratio
bestMatchIdx = idx
}
}
for idx := 0; idx < n; idx++ {
candidateText := candidates.TextOf(idx)
matchText(idx, text, candidateText)
if options.prefix != "" {
matchText(idx, options.prefix+" "+text, candidateText)
matchText(idx, text, options.prefix+" "+candidateText)
}
}
if bestRatio < options.minRatio {
return -1, false
}
return bestMatchIdx, true
}
type MatchOpt func(*options)
func MatchMinRatio(r float64) MatchOpt {
return func(o *options) {
o.minRatio = r
}
}
func MatchOptPrefix(prefix string) MatchOpt {
return func(o *options) {
o.prefix = prefix
}
}
type ACLMatcher []*model.ACLEntry
func (o ACLMatcher) Len() int {
return len(o)
}
func (o ACLMatcher) TextOf(idx int) string {
return o[idx].Alias
}
type ListItemsMatcher []*model.ListItem
func (o ListItemsMatcher) Len() int {
return len(o)
}
func (o ListItemsMatcher) TextOf(idx int) string {
return o[idx].Text
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package operations
import (
"bytes"
"fmt"
"log"
"net"
"golang.org/x/crypto/ssh"
)
// RemoteRun executes remote command
func RemoteRun(user string, addr string, port int, sshKey []byte, cmd string) (string, error) {
// Create the Signer for this private key.
signer, err := ssh.ParsePrivateKey(sshKey)
if err != nil {
log.Fatalf("unable to parse private key: %v", err)
}
// Authentication
config := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: func(string, net.Addr, ssh.PublicKey) error { return nil },
}
// Connect
client, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", addr, port), config)
if err != nil {
return "", err
}
// Create a session. It is one session per command.
session, err := client.NewSession()
if err != nil {
return "", err
}
defer session.Close()
var b bytes.Buffer
session.Stdout = &b // get output
err = session.Run(cmd)
return b.String(), err
}
|
package transportador
import (
"context"
"encoding/json"
"net/http"
)
type (
CriarEntregaRequest struct {
Entrega Entrega
}
CriarEntregaResponse struct {
Voucher Voucher
}
)
func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {
return json.NewEncoder(w).Encode(response)
}
func decodeEntregaReq(ctx context.Context, r *http.Request) (interface{}, error) {
var req CriarEntregaRequest
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
return nil, err
}
return req, nil
}
|
package route
import (
"github.com/kataras/iris"
"github.com/kataras/iris/context"
"github.com/kataras/iris/core/router"
"gocherry-api-gateway/admin/controllers"
)
func RegisterRoutes(app *iris.Application) {
app.Get("/", func(ctx context.Context) {
_, _ = ctx.WriteString("admin 200")
})
app.Post("/login", controllers.LoginHandler)
/**
应用级
*/
app.PartyFunc("/app", func(route router.Party) {
route.Any("/list", controllers.AppListHandler)
route.Post("/save", controllers.AppSaveHandler)
})
/**
服务
*/
app.PartyFunc("/cluster", func(route router.Party) {
route.Post("/list", controllers.ClusterListHandler)
route.Post("/save", controllers.ClusterSaveHandler)
})
/**
服务的分布节点
*/
app.PartyFunc("/server", func(route router.Party) {
route.Post("/list", controllers.ServerListHandler)
route.Post("/save", controllers.ServerSaveHandler)
route.Post("/del", controllers.ServerDelHandler)
})
/**
api接口
*/
app.PartyFunc("/api", func(route router.Party) {
route.Post("/list", controllers.ApiListHandler)
route.Post("/save", controllers.ApiSaveHandler)
route.Post("/get_one", controllers.ApiGetOneHandler)
route.Post("/del", controllers.ApiDelOneHandler)
})
/**
用户接口
*/
app.PartyFunc("/user", func(route router.Party) {
route.Post("/list", controllers.UserListHandler)
route.Post("/save", controllers.UserSaveHandler)
route.Post("/del", controllers.UserDelHandler)
})
app.PartyFunc("/admin", func(route router.Party) {
//加管理员中间件 只有管理员才可操作 比如用户的添加删除 服务的删除等
route.Post("/user_add", func(ctx context.Context) {
})
})
}
|
package 矩阵
// -------------------------------- SubrectangleQueries --------------------------------
// 执行用时:56 ms, 在所有 Go 提交中击败了92.31% 的用户
// 内存消耗:7.2 MB, 在所有 Go 提交中击败了100.00% 的用户
//
// 概述: 这是一种非暴力的解决方案,
// UpdateSubrectangle 时间复杂度: O(1)
// GetValue 时间复杂度: O(x),其中 x 为 UpdateSubrectangle 的调用次数。
type SubrectangleQueries struct {
matrix [][]int
updateRectangles []*UpdateRectangle
}
func Constructor(matrix [][]int) SubrectangleQueries {
return SubrectangleQueries{
matrix: matrix,
updateRectangles: make([]*UpdateRectangle, 0),
}
}
func (sq *SubrectangleQueries) UpdateSubrectangle(row1 int, col1 int, row2 int, col2 int, newValue int) {
sq.updateRectangles = append(sq.updateRectangles, NewUpdateRectangle(row1, col1, row2, col2, newValue))
}
func (sq *SubrectangleQueries) GetValue(row int, col int) int {
for i := len(sq.updateRectangles) - 1; i >= 0; i-- {
if isCoordinateInUpdateRectangle(sq.updateRectangles[i], row, col) {
return sq.updateRectangles[i].Value
}
}
return sq.matrix[row][col]
}
func isCoordinateInUpdateRectangle(rectangle *UpdateRectangle, x, y int) bool {
return x >= rectangle.LeftUpX && x <= rectangle.RightDownX && y >= rectangle.LeftUpY && y <= rectangle.RightDownY
}
// -------------------------------- UpdateRectangle --------------------------------
type UpdateRectangle struct {
LeftUpX int
LeftUpY int
RightDownX int
RightDownY int
Value int
}
func NewUpdateRectangle(row1 int, col1 int, row2 int, col2 int, value int) *UpdateRectangle {
return &UpdateRectangle{row1, col1, row2, col2, value}
}
/*
题目链接: https://leetcode-cn.com/problems/subrectangle-queries/
总结:
1. 在更新的时候,有时做一下标记就可以了,不用真实的进行更新。
*/
|
package soapboxd
import "database/sql"
func newNullString(s string) sql.NullString {
return sql.NullString{String: s, Valid: true}
}
func nullString(ns sql.NullString) string {
if ns.Valid {
return ns.String
}
return ""
}
|
package nominetuk
import (
"encoding/json"
"github.com/nbio/xx"
)
// Result represents an EPP <result> element.
type Result struct {
Code int `xml:"code,attr" json:"code"`
Message string `xml:"msg" json:"message"`
ExtValue `json:"ext_value"`
}
// IsError determines whether an EPP status code is an error.
// https://tools.ietf.org/html/rfc5730#section-3
func (r *Result) IsError() bool {
return r.Code >= 2000
}
// Error implements the error interface.
func (r *Result) Error() string {
j, _ := json.Marshal(r)
return string(j)
}
// IsFatal determines whether an EPP status code is a fatal response,
// and the connection should be closed.
// https://tools.ietf.org/html/rfc5730#section-3
func (r *Result) IsFatal() bool {
return r.Code >= 2500
}
type ExtValue struct {
name string `json:"name"`
reason string `json:"reason"`
}
func init() {
path := "epp>response>result"
scanResponse.MustHandleStartElement(path, func(c *xx.Context) error {
res := &c.Value.(*response_).Result
res.Code = c.AttrInt("", "code")
return nil
})
scanResponse.MustHandleCharData(path+"> msg", func(c *xx.Context) error {
c.Value.(*response_).Result.Message = trimString(string(c.CharData))
return nil
})
scanResponse.MustHandleCharData(path+"> extValue > value > name", func(c *xx.Context) error {
c.Value.(*response_).Result.ExtValue.name = trimString(string(c.CharData))
return nil
})
scanResponse.MustHandleCharData(path+"> extValue > reason", func(c *xx.Context) error {
c.Value.(*response_).Result.ExtValue.reason = trimString(string(c.CharData))
return nil
})
}
|
package _006_zigzag_conversion
func convert(s string, numRows int) string {
if numRows == 0 {
return s
}
n := min(numRows, len(s))
z := make([]string, n)
res := ""
curRow, goingDown := 0, false
for _, c := range s {
z[curRow] += string(c)
if curRow == 0 || curRow == numRows-1 {
goingDown = !goingDown
}
if !goingDown {
curRow -= 1
} else {
curRow += 1
}
}
for _, str := range z {
res += str
}
return res
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
func rotate(matrix [][]int) {
size := len(matrix)
//Transformation
for i := 0; i < size; i++ {
for j := 0; j < size; j++ {
if i != j && i > j {
temp := matrix[i][j]
matrix[i][j] = matrix[j][i]
matrix[j][i] = temp
}
}
}
halfSize := (size / 2)
for i := 0; i < size; i++ {
for j := 0; j < halfSize; j++ {
temp := matrix[i][j]
matrix[i][j] = matrix[i][size - j - 1]
matrix[i][size - j - 1] = temp
}
}
} |
package c31_hmac_sha1_timing_leak
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/vodafon/cryptopals/set1/c1_hex_to_base64"
)
type TTable struct {
key string
exp string
}
func TestHMACImplementation(t *testing.T) {
ttb := []TTable{
{
key: "KEY",
exp: "c4473eba2b6e74a0adc0abbb4216676967626127",
},
{
key: strings.Repeat("A", 64),
exp: "3a40869699fafd80f32200a927481822ac57c962",
},
{
key: strings.Repeat("B", 70),
exp: "4bf35b5af0ea3f3b0dd5522d2ab09a690242d469",
},
}
inp := []byte("Some text")
for _, t := range ttb {
key := []byte(t.key)
exp := c1_hex_to_base64.ParseHex(t.exp)
hs := NewHMACSystem(key)
mac := hs.HMAC(inp)
if !bytes.Equal(mac, exp) {
fmt.Printf("Incorrect HMAC for %q:%q. Expected %x, got %x\n", key, inp, exp, mac)
}
}
}
|
package main
import (
"encoding/json"
"fmt"
)
type Server struct {
ServerName string
ServerIP string
}
type Serverslice struct {
Servers []Server
}
func main() {
var s Serverslice
str := `{"servers":[{"serverName":"Shanghai_VPN","serverIP":"127.0.0.1"},{"serverName":"Beijing_VPN","serverIP":"127.0.0.2"}]}`
json.Unmarshal([]byte(str), &s)
fmt.Println(s)
// str1 := `{"code":0,"data":{"country":"\u4e2d\u56fd","country_id":"CN","area":"\u534e\u4e2d","area_id":"400000","region":"\u6cb3\u5357\u7701","region_id":"410000","city":"\u90d1\u5dde\u5e02","city_id":"410100","county":"","county_id":"-1","isp":"\u8054\u901a","isp_id":"100026","ip":"182.118.53.124"}}`
}
|
/*
Copyright 2019 The Yingxi.company Authors. All rights reserved.
Go
go get github.com/spf13/viper
go get github.com/go-fsnotify/fsnotify
Util
*/
package util
import (
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"github.com/lexkong/log"
)
// 配置结构
type Config struct {
Name string
}
// 初始化
func (conf *Config) getConfig() error {
// 加载路径
viper.AddConfigPath("conf")
viper.SetConfigName("web")
// 设置格式yaml
viper.SetConfigType("yaml")
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
// 初始化
func InitLog(cfg string) error {
conf := Config{
Name: cfg,
}
if err := conf.getConfig(); err != nil {
return err
}
go conf.watchConfig()
return nil
}
// 热加载配置文件
func (c *Config) watchConfig() {
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
log.Infof("[Web] Config file changed: %s\n", e.Name)
})
}
// 获取Key值
func GetKeyByConf(key string) string {
if key == "" {
return ""
}
return viper.GetString(key)
} |
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/kylegrantlucas/platform-exercise/handlers/session"
"github.com/kylegrantlucas/platform-exercise/handlers/user"
"github.com/kylegrantlucas/platform-exercise/pkg/postgres"
"github.com/pascaldekloe/jwt"
"github.com/sirupsen/logrus"
"github.com/urfave/negroni"
)
func attachHandlers(router *mux.Router) {
keys := &jwt.KeyRegister{Secrets: [][]byte{[]byte(os.Getenv("JWT_KEY"))}}
headers := map[string]string{
"sub": "X-Verified-User-Uuid",
"sid": "X-Verified-Session-Uuid",
}
router.Use(jsonMiddleware)
// User Handlers
router.HandleFunc("/users", user.Create).Methods("POST")
router.Handle("/users", &jwt.Handler{Target: http.HandlerFunc(user.Delete), HeaderBinding: headers, Keys: keys}).Methods("DELETE")
router.Handle("/users", &jwt.Handler{Target: http.HandlerFunc(user.Update), HeaderBinding: headers, Keys: keys}).Methods("PUT")
// Session Handlers
router.HandleFunc("/sessions", session.Create).Methods("POST")
router.Handle("/sessions", &jwt.Handler{Target: http.HandlerFunc(session.Delete), HeaderBinding: headers, Keys: keys}).Methods("DELETE")
}
func main() {
// Setup Postgres connection early, so we can fail fast if it doesn't work
var err error
postgres.DB, err = postgres.CreateDatabase(os.Getenv("PG_HOST"), os.Getenv("PG_PORT"), os.Getenv("PG_USER"), os.Getenv("PG_PASS"), os.Getenv("PG_DB_NAME"))
if err != nil {
log.Fatalf("couldn't connect to postgres: %v", err)
}
// Setup our mux router, handlers, negroni middleware and logger
router, n, recovery := mux.NewRouter().StrictSlash(true), negroni.New(), negroni.NewRecovery()
setupLogger(recovery)
attachHandlers(router)
n.Use(recovery)
n.Use(negroni.NewLogger())
n.UseHandler(router)
// Setup and startup our HTTP server
port := "8080"
if os.Getenv("PORT") != "" {
port = os.Getenv("PORT")
}
log.Printf("now serving traffic on port :%v", port)
err = http.ListenAndServe(fmt.Sprintf(":%v", port), n)
if err != nil {
log.Fatal(err)
}
}
// Ensures all requests are Content-Type application/json
func jsonMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
next.ServeHTTP(w, r)
})
}
func setupLogger(recovery *negroni.Recovery) {
// Create the logrus logger and set a level
logger := logrus.New()
w := logger.WriterLevel(logrus.ErrorLevel)
// Setups up pretty line logging with stack traces on anything other than a 'production' environment
if os.Getenv("GO_ENV") != "production" {
logger.Level = logrus.InfoLevel
logger.Formatter = &logrus.TextFormatter{ForceColors: true}
recovery.PrintStack = true
recovery.Logger = log.New(w, "", 0)
} else {
logger.Formatter = &logrus.JSONFormatter{}
}
// Output to logrus, add line numbers so we can find our logging statements easier
log.SetOutput(logger.Writer())
log.SetFlags(log.LstdFlags | log.Lshortfile)
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package networks_impl
import (
"github.com/kurtosis-tech/kurtosis-go/lib/networks"
"github.com/kurtosis-tech/kurtosis-go/lib/services"
"github.com/kurtosis-tech/kurtosis-go/testsuite/services_impl/api"
"github.com/kurtosis-tech/kurtosis-go/testsuite/services_impl/datastore"
"github.com/palantir/stacktrace"
"strconv"
"time"
)
const (
datastoreServiceId services.ServiceID = "datastore"
apiServiceIdPrefix = "api-"
waitForStartupTimeBetweenPolls = 1 * time.Second
waitForStartupMaxNumPolls = 30
)
type TestNetwork struct {
networkCtx *networks.NetworkContext
datastoreServiceImage string
apiServiceImage string
datastoreService *datastore.DatastoreService
apiServices map[services.ServiceID]*api.ApiService
nextApiServiceId int
}
func NewTestNetwork(networkCtx *networks.NetworkContext, datastoreServiceImage string, apiServiceImage string) *TestNetwork {
return &TestNetwork{
networkCtx: networkCtx,
datastoreServiceImage: datastoreServiceImage,
apiServiceImage: apiServiceImage,
datastoreService: nil,
apiServices: map[services.ServiceID]*api.ApiService{},
nextApiServiceId: 0,
}
}
func (network *TestNetwork) AddDatastore() error {
if (network.datastoreService != nil) {
return stacktrace.NewError("Cannot add datastore service to network; datastore already exists!")
}
initializer := datastore.NewDatastoreContainerInitializer(network.datastoreServiceImage)
uncastedDatastore, checker, err := network.networkCtx.AddService(datastoreServiceId, initializer)
if err != nil {
return stacktrace.Propagate(err, "An error occurred adding the datastore service")
}
if err := checker.WaitForStartup(waitForStartupTimeBetweenPolls, waitForStartupMaxNumPolls); err != nil {
return stacktrace.Propagate(err, "An error occurred waiting for the datastore service to start")
}
castedDatastore := uncastedDatastore.(*datastore.DatastoreService)
network.datastoreService = castedDatastore
return nil
}
func (network *TestNetwork) GetDatastore() *datastore.DatastoreService {
return network.datastoreService
}
func (network *TestNetwork) AddApiService() (services.ServiceID, error) {
if (network.datastoreService == nil) {
return "", stacktrace.NewError("Cannot add API service to network; no datastore service exists")
}
serviceIdStr := apiServiceIdPrefix + strconv.Itoa(network.nextApiServiceId)
network.nextApiServiceId = network.nextApiServiceId + 1
serviceId := services.ServiceID(serviceIdStr)
initializer := api.NewApiContainerInitializer(network.apiServiceImage, network.datastoreService)
uncastedApiService, checker, err := network.networkCtx.AddService(serviceId, initializer)
if err != nil {
return "", stacktrace.Propagate(err, "An error occurred adding the API service")
}
if err := checker.WaitForStartup(waitForStartupTimeBetweenPolls, waitForStartupMaxNumPolls); err != nil {
return "", stacktrace.Propagate(err, "An error occurred waiting for the API service to start")
}
castedApiService := uncastedApiService.(*api.ApiService)
network.apiServices[serviceId] = castedApiService
return serviceId, nil
}
func (network *TestNetwork) GetApiService(serviceId services.ServiceID) (*api.ApiService, error) {
service, found := network.apiServices[serviceId]
if !found {
return nil, stacktrace.NewError("No API service with ID '%v' has been added", serviceId)
}
return service, nil
}
|
// Security:
// - api_key:
//
// SecurityDefinitions:
// - api-key:
// type: apiKey
// name: session_id
// in: header
//
// swagger:meta
package handlers
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"log"
"net/http"
"github.com/rest_service_task/impl/db"
"github.com/rest_service_task/impl/sessions"
)
type Handlers struct {
database db.DBConnection
secure sessions.SessionManager
logger *log.Logger
}
func NewHandlerSet(db db.DBConnection, security sessions.SessionManager, log *log.Logger) *Handlers {
return &Handlers{
database: db,
secure: security,
logger: log,
}
}
func HashPassword(pass string) string {
hasher := sha1.New()
hasher.Write([]byte(pass))
return hex.EncodeToString(hasher.Sum(nil))
}
func ReadBody(r *http.Request, v interface{}) error {
defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
if err != nil {
return err
}
if err = json.Unmarshal(body, v); err != nil {
return err
}
// decoder := json.NewDecoder(r.Body)
return nil
//return decoder.Decode(v)
}
|
//go:build tools
// This package exists to cause `go mod` and `go get` to believe these tools
// are dependencies, even though they are not runtime dependencies.
package tools
import (
_ "github.com/client9/misspell/cmd/misspell"
_ "github.com/golang/protobuf/protoc-gen-go"
_ "golang.org/x/lint/golint"
_ "golang.org/x/tools/cmd/goimports"
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc"
_ "honnef.co/go/tools/cmd/staticcheck"
)
|
package module
import (
"math"
"buddin.us/eolian/dsp"
)
func init() {
Register("Compress", func(Config) (Patcher, error) { return newCompress() })
}
type compress struct {
IO
in, attack, release *In
envelope dsp.Float64
dcBlock *dsp.DCBlock
}
func newCompress() (*compress, error) {
m := &compress{
in: NewIn("input", dsp.Float64(0)),
attack: NewInBuffer("attack", dsp.Duration(10)),
release: NewInBuffer("release", dsp.Duration(500)),
dcBlock: &dsp.DCBlock{},
}
err := m.Expose(
"Compress",
[]*In{m.in, m.attack, m.release},
[]*Out{{Name: "output", Provider: dsp.Provide(m)}},
)
return m, err
}
func (c *compress) Process(out dsp.Frame) {
c.in.Process(out)
attack, release := c.attack.ProcessFrame(), c.release.ProcessFrame()
for i := range out {
in := dsp.Abs(out[i])
side := release[i]
if in > c.envelope {
side = attack[i]
}
factor := math.Pow(0.01, float64(1.0/side))
c.envelope = dsp.Float64(factor)*(c.envelope-in) + in
if c.envelope > 1 {
out[i] /= c.envelope
}
out[i] = c.dcBlock.Tick(out[i])
}
}
|
package basic
import (
"log"
"os"
)
var (
Logger, LoggerFile = getLogger()
)
func getLogger() (*log.Logger, *os.File) {
file, _:= os.Create("debuglog.txt")
logger := log.New(file, "[goStudy] ", log.Ldate|log.Ltime)
return logger, file
} |
package main
import (
"encoding/json"
"flag"
"fmt"
log "github.com/cihub/seelog"
"github.com/goodsign/gosmsc"
"github.com/goodsign/gosmsc/rpcservice"
"github.com/goodsign/goutils/mgo"
"github.com/goodsign/rpc"
gjson "github.com/goodsign/rpc/json"
"io/ioutil"
lmgo "labix.org/v2/mgo"
"net/http"
"os"
"os/signal"
"strconv"
"syscall"
"time"
)
const (
ErrorCodeInvalidConfig = -1
ErrorCodeInvalidArgs = -2
ErrorCodeInternalInitError = -3
ConnectTimeout = 5 * time.Minute
SeelogCfg = "seelog.xml"
pidFileName = "sms-service.pid"
)
var (
rpcPath = flag.String("rpcpath", "rpc", "Rpc service path (http.Handle parameter)")
port = flag.String("p", "5678", "Port")
cfgPath = flag.String("cfg", "smsc.default.json", "Path to service configuration file")
mongoPath = flag.String("dbpath", "localhost", "Mongo path")
mongoDb = flag.String("mongodb", "gastody_sms_service", "Mongo DB")
updateInterval = flag.String("interval", "60000", "Update interval in milliseconds")
)
func loadLogger() {
logger, err := log.LoggerFromConfigAsFile(SeelogCfg)
if err != nil {
panic(err)
}
rpcservice.UseLogger(logger)
gosmsc.UseLogger(logger)
log.ReplaceLogger(logger)
}
func unmarshalConfig(configFileName string) (conf *gosmsc.SenderCheckerImpl, err error) {
log.Infof("loading config from %s", configFileName)
bytes, err := ioutil.ReadFile(configFileName)
if err != nil {
return nil, err
}
opts := new(gosmsc.SmscClientOptions)
log.Debug("Unmarshalling config")
err = json.Unmarshal(bytes, opts)
if err != nil {
return nil, fmt.Errorf("Cannot unmarshal: '%s'", err)
}
dinfo := &lmgo.DialInfo{
[]string{*mongoPath},
true,
ConnectTimeout,
false,
*mongoDb,
"",
"",
nil,
nil,
}
hlp, err := mgo.Dial(dinfo, &mgo.DbHelperInitOptions{&lmgo.Safe{}})
if err != nil {
return nil, err
}
str, err := gosmsc.NewMessageStatusMgoStorage(hlp)
if err != nil {
return nil, err
}
upint, err := strconv.ParseInt(*updateInterval, 10, 32)
if err != nil {
return nil, err
}
conf, err = gosmsc.NewSenderCheckerImpl(opts, str, time.Millisecond*time.Duration(upint))
if err != nil {
return nil, fmt.Errorf("Invalid config: '%s'", err)
}
return
}
func fail(code int, msg string) {
log.Critical(msg)
log.Flush()
os.Exit(code)
}
func main() {
flag.Parse()
loadLogger()
defer log.Flush()
if len(*rpcPath) == 0 {
fail(ErrorCodeInvalidArgs, "Please specify rpc path")
}
if len(*port) == 0 {
fail(ErrorCodeInvalidArgs, "Please specify port")
}
if len(*cfgPath) == 0 {
fail(ErrorCodeInvalidArgs, "Please specify config file path")
}
sender, err := unmarshalConfig(*cfgPath)
if err != nil {
fail(ErrorCodeInvalidConfig, fmt.Sprintf("Sender init failed. '%s'", err))
}
s := rpc.NewServer()
s.RegisterCodec(gjson.NewCodec(), "application/json")
if err != nil {
fail(ErrorCodeInternalInitError, err.Error())
}
serv, err := rpcservice.NewSMSService(sender)
s.RegisterService(serv, "")
http.Handle("/"+*rpcPath, s)
ml, err := s.ListMethods("SMSService")
if err != nil {
fail(ErrorCodeInternalInitError, err.Error())
}
str := fmt.Sprintf("\nStarting service '/%s' on port ':%s'. \nMethods:\n",
*rpcPath, *port)
for _, m := range ml {
str = str + " " + m + "\n"
}
log.Info(str)
log.Flush()
ch := make(chan os.Signal)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
writePid()
go signalHandle(ch)
err = http.ListenAndServe(":"+*port, nil)
if err != nil {
fail(ErrorCodeInternalInitError, err.Error())
}
}
func signalHandle(ch chan os.Signal) {
for {
sig := <-ch
log.Debugf("Signal received: %v", sig)
err := os.Remove(pidFileName)
if err != nil {
log.Error(err)
}
log.Flush()
os.Exit(1)
}
}
func writePid() {
pid := os.Getpid()
file, err := os.Create(pidFileName)
if err != nil {
panic(log.Error(err))
}
defer file.Close()
file.WriteString(fmt.Sprintf("%d", pid))
}
|
package zookeeper
import (
"context"
"github.com/marsmay/golib/logger"
"github.com/samuel/go-zookeeper/zk"
"strings"
"sync"
"time"
)
const (
EventTypeAll = 0
)
const (
FlagPersistent = 0
FlagEphemeralAndSequence = zk.FlagEphemeral + zk.FlagSequence
)
const (
WatchTypeNode = iota
WatchTypeChildren
)
var PermWorldAll = zk.WorldACL(zk.PermAll)
type zkLogger struct {
*logger.Logger
}
func (l *zkLogger) Printf(format string, args ...interface{}) {
l.Errorf(format, args...)
}
type Config struct {
Addrs []string `toml:"addrs" json:"addrs"`
Timeout time.Duration `toml:"timeout" json:"timeout"`
}
type Client struct {
c *Config
conn *zk.Conn
logger *logger.Logger
ctx context.Context
cancel context.CancelFunc
wg *sync.WaitGroup
}
func (c *Client) WatchNode(path string, eventType zk.EventType, callback func(zk.Event)) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
for {
select {
case <-c.ctx.Done():
return
default:
_, _, eventCh, err := c.conn.ExistsW(path)
if err != nil {
c.logger.Warningf("zookeeper watch failed | path: %s | error: %s", path, err)
continue
}
event := <-eventCh
if event.Err != nil {
if event.Err != zk.ErrClosing {
c.logger.Warningf("zookeeper watch failed | path: %s | error: %s", path, event.Err)
}
continue
}
if eventType == EventTypeAll || eventType == event.Type {
callback(event)
}
}
}
}()
}
func (c *Client) WatchChildren(path string, eventType zk.EventType, callback func(zk.Event)) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
for {
select {
case <-c.ctx.Done():
return
default:
_, _, eventCh, err := c.conn.ChildrenW(path)
if err != nil {
if err != zk.ErrNoNode {
c.logger.Warningf("zookeeper watch failed | path: %s | error: %s", path, err)
}
continue
}
event := <-eventCh
if event.Err != nil {
if event.Err != zk.ErrClosing {
c.logger.Warningf("zookeeper watch failed | path: %s | error: %s", path, event.Err)
}
continue
}
if eventType == EventTypeAll || eventType == event.Type {
callback(event)
}
}
}
}()
}
func (c *Client) connect() (err error) {
conn, _, err := zk.Connect(c.c.Addrs, c.c.Timeout*time.Second, zk.WithLogger(&zkLogger{c.logger}), zk.WithLogInfo(false))
if err == nil {
c.conn = conn
}
return
}
func (c *Client) Create(path string, data []byte, flags int32, acl []zk.ACL) (err error) {
items := strings.Split(path, "/")
if len(items) > 2 {
parentPath := strings.Join(items[0:len(items)-1], "/")
ok, _, e := c.conn.Exists(parentPath)
if e != nil {
err = e
return
}
if !ok {
if err = c.Create(parentPath, nil, FlagPersistent, acl); err != nil {
return
}
}
}
_, err = c.conn.Create(path, data, flags, acl)
return
}
func (c *Client) Update(path string, data []byte) (err error) {
ok, stat, err := c.conn.Exists(path)
if err != nil {
return
}
if !ok {
return zk.ErrNoNode
}
_, err = c.conn.Set(path, data, stat.Version)
return
}
func (c *Client) Delete(path string) (err error) {
ok, stat, err := c.conn.Exists(path)
if err != nil {
return
}
if !ok {
return zk.ErrNoNode
}
err = c.conn.Delete(path, stat.Version)
return
}
func (c *Client) Children(path string) (children []string, err error) {
children, _, err = c.conn.Children(path)
return
}
func (c *Client) GetNode(path string) (data []byte, err error) {
data, _, err = c.conn.Get(path)
return
}
func (c *Client) GetNodes(path string) (datas map[string][]byte, err error) {
nodes, _, err := c.conn.Children(path)
if err != nil {
if err == zk.ErrNoNode {
err = nil
}
return
}
datas = make(map[string][]byte, len(nodes))
for _, node := range nodes {
data, _, e := c.conn.Get(path + "/" + node)
if e != nil {
if e == zk.ErrNoNode {
continue
}
err = e
return
}
datas[node] = data
}
return
}
func (c *Client) Conn() *zk.Conn {
return c.conn
}
func (c *Client) Close() {
c.cancel()
c.conn.Close()
c.wg.Wait()
}
func New(c *Config, logger *logger.Logger) (client *Client, err error) {
client = &Client{
c: c,
logger: logger,
wg: &sync.WaitGroup{},
}
client.ctx, client.cancel = context.WithCancel(context.Background())
if err = client.connect(); err != nil {
return
}
return
}
|
package proxy_core
import (
"encoding/binary"
"fmt"
"io"
"log"
"net"
"sync"
"time"
)
type Request struct {
Conn net.Conn
Buff []byte
}
type Server struct {
Server net.Listener
V int
Client net.Conn
ProxyPort int32
}
func (s *Server) IncrCycle(client net.Conn) Server {
s.V++
s.Client = client
return Server{
Server: s.Server,
V: s.V,
Client: s.Client,
ProxyPort: s.ProxyPort,
}
}
func (s *Server) Expire(V int) bool {
if s.V < V {
return true
}
return false
}
// 关闭连接
func Close(cons ...net.Conn) {
for _, conn := range cons {
_ = conn.Close()
}
}
func ListenServer(proxyPort int32) (net.Listener, error) {
address := fmt.Sprintf("0.0.0.0:%d", proxyPort)
log.Println("listen addr :" + address)
server, err := net.Listen("tcp", address)
if err != nil {
return nil, err
}
return server, nil
}
func ProxySwap(proxyConn net.Conn, client net.Conn) {
var wg sync.WaitGroup
wg.Add(2)
go ConnCopy(proxyConn, client, &wg)
go ConnCopy(client, proxyConn, &wg)
wg.Wait()
//log.Println("conn1 = [" + proxyConn.LocalAddr().String() + "], conn2 = [" + client.RemoteAddr().String() + "] iocopy读写完成")
}
func ConnCopy(conn1 net.Conn, conn2 net.Conn, wg *sync.WaitGroup) {
_, err := io.Copy(conn1, conn2)
if err != nil {
// 连接中断,不打印日志
//log.Println("conn1 = ["+conn1.LocalAddr().String()+"], conn2 = ["+conn2.RemoteAddr().String()+"] iocopy失败", err)
}
//log.Println("[←]", "close the connect at local:["+conn1.LocalAddr().String()+"] and remote:["+conn1.RemoteAddr().String()+"]")
_ = conn1.Close()
wg.Done()
}
func WritePort(conn net.Conn, port int32) bool {
if err := conn.SetWriteDeadline(time.Now().Add(3 * time.Second)); err != nil {
log.Println("Fail to set write deadline", err)
return false
}
if err := binary.Write(conn, binary.BigEndian, port); err != nil {
return false
}
if err := conn.SetWriteDeadline(time.Time{}); err != nil {
log.Println("Fail to clear write deadline", err)
return false
}
return true
}
func PrintWelcome() {
fmt.Println("+----------------------------------------------------------------+")
fmt.Println("| welcome use ho-huj-net-proxy Version1.0 |")
fmt.Println("| author Ruchsky at 2019-11-14 |")
fmt.Println("| gitee home page -> https://gitee.com/ruchsky |")
fmt.Println("| github home page -> https://github.com/hujianMr |")
fmt.Println("+----------------------------------------------------------------+")
fmt.Print("start..")
i := 0
for {
fmt.Print(">>>>")
i++
time.Sleep(time.Second)
if i >= 10 {
break
}
}
fmt.Println()
fmt.Println("start success")
time.Sleep(time.Second)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//482. License Key Formatting
//You are given a license key represented as a string S which consists only alphanumeric character and dashes. The string is separated into N+1 groups by N dashes.
//Given a number K, we would want to reformat the strings such that each group contains exactly K characters, except for the first group which could be shorter than K, but still must contain at least one character. Furthermore, there must be a dash inserted between two groups and all lowercase letters should be converted to uppercase.
//Given a non-empty string S and a number K, format the string according to the rules described above.
//Example 1:
//Input: S = "5F3Z-2e-9-w", K = 4
//Output: "5F3Z-2E9W"
//Explanation: The string S has been split into two parts, each part has 4 characters.
//Note that the two extra dashes are not needed and can be removed.
//Example 2:
//Input: S = "2-5g-3-J", K = 2
//Output: "2-5G-3J"
//Explanation: The string S has been split into three parts, each part has 2 characters except the first part as it could be shorter as mentioned above.
//Note:
//The length of string S will not exceed 12,000, and K is a positive integer.
//String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).
//String S is non-empty.
//func licenseKeyFormatting(S string, K int) string {
//}
// Time Is Money |
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package rule
type Rule interface {
Validate() error
Name() string
}
|
package main
func main() {
makeServer()
}
|
// Package handler 是 RPC 调用的 Handler
package handler
|
package asm
import (
"os"
"regexp"
"strings"
"emperror.dev/errors"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
log "github.com/sirupsen/logrus"
)
const SMPatern = "arn:aws:secretsmanager:"
const DefaultRegion = "eu-west-1"
// Client is a SM custom client
type Client struct {
api secretsmanageriface.SecretsManagerAPI
region string
}
// NewClient returns a new Client from an AWS SM client
func NewClient(api secretsmanageriface.SecretsManagerAPI) *Client {
var region string
if region = os.Getenv("AWS_REGION"); region == "" {
region = DefaultRegion
}
return &Client{
api,
region,
}
}
// NewAPI returns a new concrete AWS SM client
func NewAPI() *secretsmanager.SecretsManager {
var region string
if region = os.Getenv("AWS_REGION"); region == "" {
region = DefaultRegion
}
return secretsmanager.New(NewFromRegion(region))
}
// NewAPIForRegion returns a new concrete AWS SM client for a specific region
func NewAPIForRegion(region string) secretsmanageriface.SecretsManagerAPI {
return secretsmanager.New(NewFromRegion(region))
}
// GetSecret return a Secret fetched from SM
func (c *Client) GetSecret(key string) (secret string, err error) {
secretName := c.ExtractPath(key)
secretRegion := c.ExtractRegion(key)
api := c.api
if secretRegion != c.region {
log.Debugf("Switching regions to %s for key %s", secretRegion, key)
api = NewAPIForRegion(secretRegion)
}
res, err := api.GetSecretValue(new(secretsmanager.GetSecretValueInput).SetSecretId(secretName))
if err != nil {
return "", errors.Wrapf(err, "GetSecretValue - Region %s ", secretRegion)
}
return *res.SecretString, nil
}
// Overrides region
func (c *Client) WithRegion(region string) {
c.api = secretsmanager.New(NewFromRegion(region))
}
func (c *Client) IsSecret(key string) bool {
return strings.Contains(key, "arn:aws:secretsmanager")
}
func (c *Client) ExtractPath(key string) (out string) {
var re = regexp.MustCompile(`(arn:aws:secretsmanager:[a-z0-9-]+:\d+:secret:[a-zA-Z0-9/._=@-]+)`)
match := re.FindStringSubmatch(key)
if len(match) < 1 {
log.Warnf("Badly formatted key %s", key)
return key
}
return match[1]
}
func (c *Client) ExtractRegion(key string) (region string) {
var re = regexp.MustCompile(`arn:aws:secretsmanager:([a-z0-9-]+):\d+:`)
match := re.FindStringSubmatch(key)
if len(match) < 1 {
return c.region
}
return match[1]
}
|
package fundsquarenet
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/mmbros/quote/internal/quotegetter"
"github.com/mmbros/quote/internal/quotegetter/scrapers"
)
// scraper gets stock/fund prices from fundsquare.net
type scraper struct {
name string
client *http.Client
}
// NewQuoteGetter creates a new QuoteGetter
// that gets stock/fund prices from fundsquare.net
func NewQuoteGetter(name string, client *http.Client) quotegetter.QuoteGetter {
return scrapers.NewQuoteGetter(&scraper{name, client})
}
// Name returns the name of the scraper
func (s *scraper) Source() string {
return s.name
}
// Client returns the http.Client of the scraper
func (s *scraper) Client() *http.Client {
return s.client
}
// GetSearch executes the http GET of the search page for the specified `isin`.
// It returns the http.Response or nil if the scraper can build the url of the info page
// directly from the `isin`.
// The response document will be parsed by ParseSearch to extract the info url.
func (s *scraper) GetSearch(ctx context.Context, isin string) (*http.Request, error) {
return nil, nil
}
// ParseSearch parse the html of the search page to find the URL of the info page.
// `doc` can be nil if the url of the info page can be build directly from the `isin`.
// It returns the url of the info page.
func (s *scraper) ParseSearch(doc *goquery.Document, isin string) (string, error) {
url := fmt.Sprintf("https://www.fundsquare.net/search-results?ajaxContentView=renderContent"+
"&=undefined&search=%s&isISIN=O&lang=EN&fastSearch=O", isin)
return url, nil
}
// GetInfo is ...
func (s *scraper) GetInfo(ctx context.Context, isin, url string) (*http.Request, error) {
// headers of the http request
headers := map[string]string{
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0",
"Accept": "text/html;type=ajax",
"Accept-Language": "en-US,en;q=0.5",
"X-Requested-With": "XMLHttpRequest",
"DNT": "1",
"Connection": "keep-alive",
"Referer": "https://www.fundsquare.net/search-results?fastSearch=O&isISIN=O&search=" + isin,
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, err
}
// set the request's headers
for k, v := range headers {
req.Header.Set(k, v)
}
return req, nil
}
// ParseInfo is ...
func (s *scraper) ParseInfo(doc *goquery.Document, isin string) (*scrapers.ParseInfoResult, error) {
// SCENARIO OK
// -----------
// <div id="content">
// <table style="width: 100%">
// <tr>
// <td>
// <span style="font-weight: bold;">IE00B4TG9K96</span>
// PIMCO GIS Diversified Income Fund E Hgd EUR Dis </td>
// <td></td></tr></table>
// <table width="85%">
// <tr>
// <td width="30%">Last NAV</td>
// <td width="15%">11/09/2020</td>
// <td width="55%">
// <span class="surligneorange">11.49 EUR</span>
// SCENARIO Last NAV status = Unavailable
// --------------------------------------
// <div id="content">
// <table style="width: 100%">
// <tbody>
// <tr>
// <td><span style="font-weight: bold;">IE00B4TG9K96</span> PIMCO GIS Diversified Income Fund E Hgd
// EUR Dis </td>
// <td></td>
// </tr>
// </tbody>
// </table>
// <table width="85%">
// <tbody>
// <tr>
// <td width="30%">Last NAV status</td>
// <td width="70%">Unavailable - Closed Market / Bank Holiday (from 21/09/2020 to 21/09/2020)</td>
// </tr>
// </tbody>
// </table>
// <table width="85%">
// <tbody>
// <tr>
// <td width="30%">Previous NAV</td>
// <td width="15%">18/09/2020</td>
// <td width="55%"><span class="surligneorange">11.47 EUR</span> <span
// style="color:#DD0000;text-align:left;padding:4px 0;"> -0.17 % <img
// src="/images/share/variationNegative.gif" style="vertical-align:middle;" /></span></td>
// </tr>
// </tbody>
// </table>
// SCENARIO No result
// ------------------
// <div class="box-message-info" style="">
// <h3 class="table01-title">
// <big>Search result</big>
// </h3>
// <table width="100%">
// <tbody>
// <tr>
// <td valign="middle" align="center">
// <img src="/images/share/research.gif" alt="info"/>
// </td>
// <td valign="middle" align="center">
// <span class="title">
// <div class="contenu"><p class="zero"></p><p><span class="surligneorange"> No result</span> produced by your request.</p><p>Please<span class="surligneorange"> modify your search criteria</span>.</p><input type="submit" id="valida" name="valider" onclick="window.location='/search?fastSearch=O&isISIN=O&search=IT0005247157'" class="back_search_w btn_w" value="Back to search"/><br class="clear_r"/><p></p></div><p class="ps">Number of results : <span>0</span></p>
// </span>
r := new(scrapers.ParseInfoResult)
r.DateLayout = "02/01/2006"
var txtPriceCurrency string
isLastNavAvailable := false
doc.Find("div#content table td").EachWithBreak(func(i int, s *goquery.Selection) bool {
switch i {
case 0:
r.IsinStr = s.Find("span").Text()
case 3:
r.DateStr = s.Text()
isLastNavAvailable = !strings.HasPrefix(r.DateStr, "Unavailable")
case 4:
if isLastNavAvailable {
txtPriceCurrency = s.Find("span").Text() // 11.49 EUR
return false
}
case 5:
// Previous NAV
r.DateStr = s.Text()
case 6:
// Previous NAV
txtPriceCurrency = s.Find("span").Text() // 11.49 EUR
return false
}
return true
})
if txtPriceCurrency == "" {
// check for "No result produced by your request.""
s := strings.TrimSpace(doc.Find("div.contenu span.surligneorange").Text())
if strings.HasPrefix(s, "No result") {
return r, scrapers.ErrNoResultFound
}
}
// split price and currency (11.49 EUR)
var errPrice error
r.PriceStr, r.CurrencyStr, errPrice = scrapers.SplitPriceCurrency(txtPriceCurrency, true)
return r, errPrice
}
|
package friend
import (
"fmt"
"spapp/src/common/constants"
helper "spapp/src/common/helpers"
"spapp/src/models/apimodels"
friendmodels "spapp/src/models/apimodels/friend"
"spapp/src/models/domain"
"spapp/src/persistence"
"strconv"
)
func GetRecipientsCommand(input friendmodels.GetRecipientsInput) friendmodels.GetRecipientsOutput {
// 2
if len(input.Sender) == 0 {
var output = friendmodels.GetRecipientsOutput{
apimodels.ApiResult{ false, []string {"Sender isn't null or empty"}},
[]string{}}
return output
}
// 3
if !helper.IsEmail(input.Sender) {
var output = friendmodels.GetRecipientsOutput{
apimodels.ApiResult{false, []string {"Sender isn't valid email address"}},
[]string{}}
return output
}
// 4
if len(input.Text) == 0 {
var output = friendmodels.GetRecipientsOutput{
apimodels.ApiResult{false, []string {"Text isn't null or empty"}},
[]string{}}
return output
}
var users []domain.UserDomain
_, _ = persistence.DbContext.Select(&users, "select Id, Username From User Where Username=?", input.Sender)
if len(users) == 0 {
var msg = fmt.Sprintf("%s isn't registered", input.Sender)
var output = friendmodels.GetRecipientsOutput{
apimodels.ApiResult{false, []string {msg}},
[]string{}}
return output
}
var currentUser = users[0]
// Blocked Users
var blockedIds []int
_, _ = persistence.DbContext.Select(&blockedIds,"Select Requestor From Subscribe_User Where Target = ? And Status=?", currentUser.Id, constants.Blocked)
var blockUserIdsParam = ""
if len(blockedIds) > 0 {
for i := range blockedIds {
blockedId := blockedIds[i]
blockUserIdsParam = blockUserIdsParam + "," + strconv.Itoa(blockedId)
}
var rune = []rune(blockUserIdsParam)
blockUserIdsParam = string(rune[1:])
}
// toUserIds
var query = "Select ToUserID From User_Friend Where FromUserID=?"
if len(blockUserIdsParam) > 0 {
query = fmt.Sprintf("%s And ToUserID Not In (%s)", query, blockUserIdsParam)
}
var toFriendUserIds []int
_, _ = persistence.DbContext.Select(&toFriendUserIds, query, currentUser.Id)
// fromUserIds
query = "Select FromUserID From User_Friend Where ToUserID=?"
if len(blockUserIdsParam) > 0 {
query = fmt.Sprintf("%s And FromUserID Not In (%s)", query, blockUserIdsParam)
}
var fromFriendUserIds []int
_, _ = persistence.DbContext.Select(&fromFriendUserIds, query, currentUser.Id)
// subscribeUserIds
var subscribeUserIds []int
_, _ = persistence.DbContext.Select(&subscribeUserIds,"Select Requestor From Subscribe_User Where Target = ? And Status=?", currentUser.Id, constants.Subscribed)
// Extract Emails from Text
var matchEmails = helper.ExtractEmails(input.Text)
var matchedUserIds = []int{}
if len(matchEmails) > 0 {
var emailsParam = ""
for i := range matchEmails {
matchEmail := matchEmails[i]
emailsParam = fmt.Sprintf("%s, '%s'",emailsParam, matchEmail)
}
var rune = []rune(emailsParam)
emailsParam = string(rune[1:])
query = fmt.Sprintf("Select Id From User Where Username In (%s) And Id != %s", emailsParam, strconv.Itoa(currentUser.Id))
if len(blockUserIdsParam) > 0 {
query = fmt.Sprintf("%s And Id Not In (%s)", query, blockUserIdsParam)
}
_, _ = persistence.DbContext.Select(&matchedUserIds,query)
}
var notifyUserIds = []int{}
notifyUserIds = append(toFriendUserIds, fromFriendUserIds...)
notifyUserIds = append(notifyUserIds, subscribeUserIds...)
notifyUserIds = append(notifyUserIds, matchedUserIds...)
var emails = []string {}
if len(notifyUserIds) > 0 {
param := ""
for i := range notifyUserIds {
notifyUserId := notifyUserIds[i]
param = fmt.Sprintf("%s,%s", param, strconv.Itoa(notifyUserId))
}
var rune = []rune(param)
param = string(rune[1:])
query = fmt.Sprintf("Select Username From User Where Id In (%s)", param)
_, _ = persistence.DbContext.Select(&emails,query)
}
output := friendmodels.GetRecipientsOutput{apimodels.ApiResult{true, []string {}},emails}
return output
} |
package gsm7bit
import (
"bytes"
"golang.org/x/text/transform"
)
type gsm7Decoder struct {
packed bool
}
func (d gsm7Decoder) Reset() { /* no needed */ }
func (d gsm7Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) == 0 {
return
}
var buf bytes.Buffer
septets := unpackSeptets(src, d.packed)
err = ErrInvalidByte
for i, septet := 0, byte(0); i < len(septets); i++ {
septet = septets[i]
if septet <= 0x7F && septet != esc {
buf.WriteRune(reverseLookup[septet])
} else {
i++
if i >= len(septets) {
return
}
r, ok := reverseEscapes[septets[i]]
if !ok {
return
}
buf.WriteRune(r)
}
}
err = nil
nDst = buf.Len()
if len(dst) < nDst {
nDst = 0
err = transform.ErrShortDst
} else {
decoded := buf.Bytes()
if n := len(decoded); n > 2 && (decoded[n-1] == cr || decoded[n-2] == cr) {
nDst--
}
copy(dst, decoded)
}
return
}
func unpackSeptets(septets []byte, packed bool) []byte {
if !packed {
return septets
}
var septet, bit byte = 0, 0
var buf bytes.Buffer
buf.Grow(len(septets))
for _, octet := range septets {
for i := 0; i < 8; i++ {
septet |= octet >> i & 1 << bit
bit++
if bit == 7 {
buf.WriteByte(septet)
septet = 0
bit = 0
}
}
}
return buf.Bytes()
}
|
package main
import (
"fmt"
"io/ioutil"
)
func main() {
text, errorFile := ioutil.ReadFile("example.txt")
showError(errorFile)
fmt.Println(string(text))
}
func showError(e error) {
if e != nil {
panic(e)
}
}
|
package mongo_utils
import (
"context"
"errors"
"fmt"
"github.com/subosito/gotenv"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"log"
"os"
"strings"
"time"
)
const (
dbhost = "127.0.0.1:27017"
authdb = ""
authuser = ""
authpass = ""
timeout = 30 * time.Second
poollimit = 4096
)
var client *mongo.Client
func init() {
gotenv.Load()
ctx, _ := context.WithTimeout(context.Background(), timeout)
//mongodb://user:password@host:port/userDb
var c *mongo.Client
var err error
if os.Getenv("MONGOHOST") != "" && os.Getenv("MONGOPORT") != ""{
if os.Getenv("MONGOUSER") != "" && os.Getenv("MONGOPASSWORD") != "" && os.Getenv("MONGOAUTHDB") != "" {
c, err = mongo.Connect(ctx, options.Client().ApplyURI(fmt.Sprintf("mongodb://%s:%s@%s:%s/%s",os.Getenv("MONGOUSER"), os.Getenv("MONGOPASSWORD"), os.Getenv("MONGOHOST"), os.Getenv("MONGOPORT"), os.Getenv("MONGOAUTHDB"))))
} else {
c, err = mongo.Connect(ctx, options.Client().ApplyURI(fmt.Sprintf("mongodb://%s:%s", os.Getenv("MONGOHOST"), os.Getenv("MONGOPORT"))))
}
} else {
c, err = mongo.Connect(ctx, options.Client().ApplyURI(fmt.Sprintf("mongodb://%s", dbhost)))
}
if err != nil {
log.Fatal(err)
}
err = c.Ping(ctx, readpref.Primary())
if err != nil {
log.Fatal(err)
}
client = c
}
func InitDB(MONGOHOST string, MONGOPORT string) {
gotenv.Load()
ctx, _ := context.WithTimeout(context.Background(), timeout)
//mongodb://user:password@host:port/userDb
var c *mongo.Client
var err error
c, err = mongo.Connect(ctx, options.Client().ApplyURI(
fmt.Sprintf("mongodb://%s:%s", MONGOHOST, MONGOPORT)),
)
if err != nil {
log.Fatal(err)
}
err = c.Ping(ctx, readpref.Primary())
if err != nil {
log.Fatal(err)
}
client = c
}
func connect(databaseName string, collectionName string) *mongo.Collection {
return client.Database(databaseName).Collection(collectionName)
}
//Search and find by id
func FindOne(db string, col string, search interface{}) *mongo.SingleResult {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
resp := collection.FindOne(ctx, search)
return resp
}
func FindMany(db string, col string, search interface{}) (*mongo.Cursor, *context.Context, error) {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
curr, err := collection.Find(ctx, search)
if err != nil {
if strings.Contains(err.Error(), "no documents in result") {
log.Println("No document founded {}")
return nil, &ctx, nil
}
return nil, &ctx, err
}
return curr, &ctx, nil
}
func InsertOne(db string, col string, data interface{}) (string, error) {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInsert := collection.InsertOne(ctx, data)
if errInsert != nil {
return "", errInsert
}
if oid, ok := res.InsertedID.(primitive.ObjectID); ok {
return oid.Hex(), nil
} else {
return "", errors.New("not objectId returned")
}
}
func InsertMany(db string, col string, data []interface{}) ([]string, error) {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInserts := collection.InsertMany(ctx, data)
if errInserts != nil {
return nil, errInserts
}
var ids []string
for _, e := range res.InsertedIDs {
if oid, ok := e.(primitive.ObjectID); ok {
ids = append(ids, oid.Hex())
}
}
log.Println("Successfully add with IDS ", res.InsertedIDs)
return ids, nil
}
func UpdateOne(db string, col string, search interface{}, data interface{}) error {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInsert := collection.UpdateOne(ctx, search, data)
if errInsert != nil {
return errInsert
}
log.Println("Matched", res.MatchedCount)
log.Println("Modified", res.ModifiedCount)
return nil
}
func UpdateMany(db string, col string, search interface{}, data interface{}) error {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInsert := collection.UpdateMany(ctx, search, data)
if errInsert != nil {
return errInsert
}
log.Println("Matched", res.MatchedCount)
log.Println("Modified", res.ModifiedCount)
return nil
}
func DeleteOne(db string, col string, search interface{}) error {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInsert := collection.DeleteOne(ctx, search)
if errInsert != nil {
return errInsert
}
log.Println("Deleted ", res.DeletedCount)
return nil
}
func DeleteMany(db string, col string, search interface{}) error {
collection := connect(db, col)
ctx, _ := context.WithTimeout(context.Background(), timeout)
res, errInsert := collection.DeleteMany(ctx, search)
if errInsert != nil {
return errInsert
}
log.Println("Deleted ", res.DeletedCount)
return nil
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"go/ast"
"go/parser"
"go/token"
"io/fs"
"path/filepath"
"strings"
"github.com/pingcap/log"
"go.uber.org/zap"
)
var testMap map[string]uint32
func initCount() {
testMap = make(map[string]uint32)
}
func addTestMap(path string) {
if _, ok := testMap[path]; !ok {
testMap[path] = 0
}
testMap[path]++
}
func walk() {
err := filepath.Walk(".", func(path string, d fs.FileInfo, _ error) error {
if d.IsDir() || !strings.HasSuffix(d.Name(), "_test.go") {
return nil
}
return scan(path)
})
if err != nil {
log.Fatal("fail to walk", zap.Error(err))
}
}
func scan(path string) error {
fset := token.NewFileSet()
path, err := filepath.Abs(path)
if err != nil {
return err
}
f, err := parser.ParseFile(fset, path, nil, parser.AllErrors)
if err != nil {
return err
}
for _, n := range f.Decls {
funcDecl, ok := n.(*ast.FuncDecl)
if ok {
if strings.HasPrefix(funcDecl.Name.Name, "Test") && funcDecl.Recv == nil &&
funcDecl.Name.Name != "TestMain" {
addTestMap(filepath.Dir(path))
}
}
}
return nil
}
|
package utils
import (
"errors"
"io/ioutil"
"strings"
)
var (
ErrNoSuchDirOrFile = errors.New("ERR: no such file or direcory")
)
func GetDirOrFilePathFromRoot(root string, target string) (string, error) {
RootDirs := strings.Split(root, "/")
if len(RootDirs) != 0 {
if RootDirs[len(RootDirs)-1] == target {
return root, nil
}
}
dirsAndFiles, err := ioutil.ReadDir(root)
if err != nil {
return "", err
}
for _, v := range dirsAndFiles {
if v.IsDir() {
path, err := GetDirOrFilePathFromRoot(root+"/"+v.Name(), target)
if err == nil {
return path, nil
}
}
}
return "", ErrNoSuchDirOrFile
}
|
package core
type Receipt struct {
}
func NewReceipt() *Receipt {
r := &Receipt{}
return r
} |
package users
import (
json "github.com/json-iterator/go"
"github.com/klaytn/klaytn/common"
"github.com/klaytn/klaytn/common/hexutil"
"github.com/perlin-network/noise"
"github.com/perlin-network/noise/payload"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
)
var (
_ noise.Message = (*SignUpRequest)(nil)
_ noise.Message = (*SignUpResponse)(nil)
)
type SignUpRequest struct {
MessageId uuid.UUID `json:"message_id"` // Ignore when write message
IdentityHash common.Hash `json:"identity_hash"`
}
func (SignUpRequest) Read(reader payload.Reader) (noise.Message, error) {
var req SignUpRequest
err := json.NewDecoder(reader).Decode(&req)
if err != nil {
return nil, errors.Wrap(err, "failed to decode signup request message")
}
return req, nil
}
func (req SignUpRequest) Write() []byte {
reqBytes, _ := json.Marshal(req)
return reqBytes
}
func (req SignUpRequest) ID() uuid.UUID {
return req.MessageId
}
func (req *SignUpRequest) SetID(id uuid.UUID) {
req.MessageId = id
}
type SignUpResponse struct {
MessageId uuid.UUID `json:"message_id"`
Sign hexutil.Bytes `json:"signature"`
}
func (SignUpResponse) Read(reader payload.Reader) (noise.Message, error) {
var resp SignUpResponse
err := json.NewDecoder(reader).Decode(&resp)
if err != nil {
return nil, errors.Wrap(err, "failed to decode signup response message")
}
return resp, nil
}
func (resp SignUpResponse) Write() []byte {
respBytes, _ := json.Marshal(resp)
return respBytes
}
func (resp SignUpResponse) ID() uuid.UUID {
return resp.MessageId
}
func (resp *SignUpResponse) SetID(id uuid.UUID) {
resp.MessageId = id
}
func (resp SignUpResponse) Signature() hexutil.Bytes {
return resp.Sign
}
func (resp *SignUpResponse) SetSignature(sign hexutil.Bytes) {
resp.Sign = sign
}
|
package main
import (
"fmt"
"sort"
"strings"
)
func main() {
fmt.Println(groupAnagrams([]string{
"eat", "tea", "tan", "ate", "nat", "bat",
}))
}
func sortStr(str string) string {
a := strings.Split(str, "")
sort.Slice(a, func(i, j int) bool {
return a[i] < a[j]
})
return strings.Join(a, "")
}
func groupAnagrams(strs []string) [][]string {
mm := make(map[string][]string)
for _, str := range strs {
s := sortStr(str)
mm[s] = append(mm[s], str)
}
var ans [][]string
for _, v := range mm {
ans = append(ans, v)
}
return ans
}
|
package main
// Token represents a lexical token
type Token int
const (
// EOF represents the end of file
EOF Token = iota
// Error represents an error
Error
// Assign represents the assignment '='
Assign
// Number represents a simple number
Number
// Operator an operator such as '+' '-' '*' '**' 'max' 'min' '+\' '+/'
Operator
// Space represents space separation between tokens
Space
// Identifier represent an identifier such as a var name
Identifier
)
|
// golrn04 - Learning go
// Maps
//
// 2016-02-28 PV
package main
import "fmt"
func main() {
var m1 map[string] int
m1 = make(map[string]int, 3)
m1["blue"] = 1
m1["white"] = 2
m1["red"] = 3
fmt.Println("red:", m1["red"])
fmt.Println("green:", m1["green"])
r,e := m1["orange"]
fmt.Println("orange ->", r, e)
r,e = m1["blue"]
fmt.Println("blue ->", r, e)
s := "bonjour"
fmt.Println("s[1] ->", s[1])
// s[1] = 'w' cannot assign to s[1]
var sl = s[3:]
fmt.Println("sl ->", sl)
}
|
package main
type Response struct {
WeatherInfo WeatherInfo `json:"weatherinfo"`
}
type WeatherInfo struct {
City string `json:"city"`
CityId string `json:"cityid"`
Temp string `json:"temp"`
WD string `json:"WD"`
WS string `json:"WS"`
SD string `json:"SD"`
WSE string `json:"WSE"`
Time string `json:"time"`
IsRadar string `json:"isRadar"`
Radar string `json:"Radar"`
NJD string `json:"njd"`
QY string `json:"qy"`
Rain string `json:"rain"`
}
|
package main
import (
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
"path/filepath"
)
func PrepareTLSCfg(certPath string, rootPath string, caPath string) (*tls.Config, error) {
tlsConfig := &tls.Config{InsecureSkipVerify: true}
if certPath != "" && rootPath != "" {
cert, err := filepath.Abs(certPath)
if err != nil {
return nil, err
}
key, err := filepath.Abs(rootPath)
if err != nil {
return nil, err
}
certificate, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return nil, err
}
tlsConfig.Certificates = append(tlsConfig.Certificates, certificate)
}
if caPath != "" {
caCertPEMPath, err := filepath.Abs(caPath)
if err != nil {
return nil, err
}
caCertPEM, err := ioutil.ReadFile(caCertPEMPath)
if err != nil {
return nil, err
}
roots := x509.NewCertPool()
if ok := roots.AppendCertsFromPEM(caCertPEM); !ok {
return nil, errors.New("failed to parse root certificate")
}
tlsConfig.RootCAs = roots
}
return tlsConfig, nil
}
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Some of the code below came from https://github.com/coreos/etcd-operator
which also has the apache 2.0 license.
*/
// Package rgw to manage a rook object store.
package rgw
import (
"fmt"
opkit "github.com/rook/operator-kit"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
)
// ObjectStoreController represents a controller object for object store custom resources
type ObjectStoreController struct {
context *clusterd.Context
scheme *runtime.Scheme
versionTag string
hostNetwork bool
}
// NewObjectStoreController create controller for watching object store custom resources created
func NewObjectStoreController(context *clusterd.Context, versionTag string, hostNetwork bool) *ObjectStoreController {
return &ObjectStoreController{
context: context,
versionTag: versionTag,
hostNetwork: hostNetwork,
}
}
// StartWatch watches for instances of ObjectStore custom resources and acts on them
func (c *ObjectStoreController) StartWatch(namespace string, stopCh chan struct{}) error {
client, scheme, err := opkit.NewHTTPClient(k8sutil.CustomResourceGroup, k8sutil.V1Alpha1, schemeBuilder)
if err != nil {
return fmt.Errorf("failed to get a k8s client for watching object store resources: %v", err)
}
c.scheme = scheme
resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{
AddFunc: c.onAdd,
UpdateFunc: c.onUpdate,
DeleteFunc: c.onDelete,
}
logger.Infof("start watching object store resources in namespace %s", namespace)
watcher := opkit.NewWatcher(ObjectStoreResource, namespace, resourceHandlerFuncs, client)
go watcher.Watch(&ObjectStore{}, stopCh)
return nil
}
func (c *ObjectStoreController) onAdd(obj interface{}) {
objectStore := obj.(*ObjectStore)
// NEVER modify objects from the store. It's a read-only, local cache.
// Use scheme.Copy() to make a deep copy of original object.
copyObj, err := c.scheme.Copy(objectStore)
if err != nil {
fmt.Printf("ERROR creating a deep copy of object store: %v\n", err)
return
}
objectStoreCopy := copyObj.(*ObjectStore)
err = objectStoreCopy.Create(c.context, c.versionTag, c.hostNetwork)
if err != nil {
logger.Errorf("failed to create object store %s. %+v", objectStore.Name, err)
}
}
func (c *ObjectStoreController) onUpdate(oldObj, newObj interface{}) {
// if the object store spec is modified, update the object store
oldStore := oldObj.(*ObjectStore)
newStore := newObj.(*ObjectStore)
if !storeChanged(oldStore.Spec, newStore.Spec) {
logger.Debugf("object store %s did not change", newStore.Name)
return
}
logger.Infof("applying object store %s changes", newStore.Name)
err := newStore.Update(c.context, c.versionTag, c.hostNetwork)
if err != nil {
logger.Errorf("failed to create (modify) object store %s. %+v", newStore.Name, err)
}
}
func (c *ObjectStoreController) onDelete(obj interface{}) {
objectStore := obj.(*ObjectStore)
err := objectStore.Delete(c.context)
if err != nil {
logger.Errorf("failed to delete object store %s. %+v", objectStore.Name, err)
}
}
func storeChanged(oldStore, newStore ObjectStoreSpec) bool {
if oldStore.DataPool.Replicated.Size != newStore.DataPool.Replicated.Size {
logger.Infof("data pool replication changed from %d to %d", oldStore.DataPool.Replicated.Size, newStore.DataPool.Replicated.Size)
return true
}
if oldStore.MetadataPool.Replicated.Size != newStore.MetadataPool.Replicated.Size {
logger.Infof("metadata pool replication changed from %d to %d", oldStore.MetadataPool.Replicated.Size, newStore.MetadataPool.Replicated.Size)
return true
}
if oldStore.Gateway.Instances != newStore.Gateway.Instances {
logger.Infof("RGW instances changed from %d to %d", oldStore.Gateway.Instances, newStore.Gateway.Instances)
return true
}
if oldStore.Gateway.Port != newStore.Gateway.Port {
logger.Infof("Port changed from %d to %d", oldStore.Gateway.Port, newStore.Gateway.Port)
return true
}
if oldStore.Gateway.SecurePort != newStore.Gateway.SecurePort {
logger.Infof("SecurePort changed from %d to %d", oldStore.Gateway.SecurePort, newStore.Gateway.SecurePort)
return true
}
if oldStore.Gateway.AllNodes != newStore.Gateway.AllNodes {
logger.Infof("AllNodes changed from %t to %t", oldStore.Gateway.AllNodes, newStore.Gateway.AllNodes)
return true
}
if oldStore.Gateway.SSLCertificateRef != newStore.Gateway.SSLCertificateRef {
logger.Infof("SSLCertificateRef changed from %s to %s", oldStore.Gateway.SSLCertificateRef, newStore.Gateway.SSLCertificateRef)
return true
}
return false
}
|
package routers
import (
"basic_blog_go/auth"
"basic_blog_go/controllers"
"github.com/astaxie/beego"
)
func init() {
ns :=
beego.NewNamespace("/v1",
beego.NSNamespace("/posts",
beego.NSInclude(&controllers.PostController{}),
),
beego.NSNamespace("/users",
beego.NSInclude(&controllers.UserController{}),
),
beego.NSNamespace("/trends",
beego.NSInclude(&controllers.TrendController{}),
),
)
beego.AddNamespace(ns)
beego.InsertFilter("/v1/trends", beego.BeforeRouter, auth.ValidateToken)
beego.InsertFilter("/v1/trends/analysis", beego.BeforeRouter, auth.ValidateToken)
beego.InsertFilter("/v1/posts", beego.BeforeRouter, auth.ValidateToken)
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/nsf/jsondiff"
)
func main() {
verbose := flag.Bool("v", false, "`true` provides a verbose output with the result of the comparison")
help := flag.Bool("help", false, "provides this help message")
flag.Parse()
if *help == true {
flag.PrintDefaults()
os.Exit(0)
}
if len(flag.Args()) != 2 {
log.Fatal("two json documents must be provided")
}
args := flag.Args()
diff, success := CompareJSON([]byte(args[0]), []byte(args[1]), verbose)
if success == false {
fmt.Println(diff.String())
os.Exit(1)
} else {
fmt.Println(diff.String())
os.Exit(0)
}
}
func CompareJSON(json1 []byte, json2 []byte, verbose *bool) (jsondiff.Difference, bool) {
opts := jsondiff.DefaultConsoleOptions()
result, diff := jsondiff.Compare(json1, json2, &opts)
if *verbose == true {
log.Println(result)
log.Println("----------DIFF----------")
log.Println(diff)
log.Println("----------DIFF----------")
}
if result.String() == "FullMatch" {
return result, true
} else if result.String() == "SupersetMatch" {
return result, true
} else {
return result, false
}
}
|
package dcp
import "testing"
func Test_applePicking(t *testing.T) {
type args struct {
types []int
}
tests := []struct {
name string
args args
want int
}{
{"0", args{types: []int{2, 1, 2, 3, 3, 1, 3, 5}}, 4},
{"1", args{types: []int{}}, 0},
{"2", args{types: []int{2, 1, 2, 3, 1, 3, 5}}, 3},
{"3", args{types: []int{1, 2, 3, 4, 1, 2, 3, 4}}, 2},
{"4", args{types: []int{2, 1, 2, 2, 2, 1, 2, 1}}, 8},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := applePicking(tt.args.types); got != tt.want {
t.Errorf("applePicking() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"net"
"strings"
"github.com/hfgo/datafile"
)
func main() {
fileName := "input.txt"
clientReader, err := datafile.GetString(fileName)
if err != nil {
log.Fatal(err)
}
conn, err := net.Dial("tcp", "127.0.0.1:8011")
if err != nil {
log.Fatalln(err)
}
defer conn.Close()
serverReader := bufio.NewReader(conn)
log.Printf("Begin to start transmitting now \n")
for _, value := range clientReader {
fmt.Fprintf(conn, value+"\n") // Write data to server
//_,err = conn.Write([]byte(value+'\n')) // if a byte format data is to be sent towards server
servResp, err := serverReader.ReadString('\n') // Read response from the server
if err == io.EOF {
fmt.Println("Server closed the connection")
} else if err == nil {
fmt.Println(strings.TrimSpace(servResp))
} else {
log.Printf("Server error : %v\n", err)
}
}
log.Printf("Ending now\n")
}
|
package main
import (
"fmt"
)
// https://leetcode-cn.com/problems/find-the-duplicate-number/
//------------------------------------------------------------------------------
// 类似于二分查找
// * 数组内数字的范围是 [1,n], 即 l,r 的初始值为 1,n
// * 求 [1,n] 的中间值 m, 并统计数组内比 m 小的个数, 值为 less; 比 m 大的个数, 记为 gt.
// * 如果 less > m-1, 说明 [1,m-1] 内的数字有重复, 则 r = m-1, 重复上面的步骤.
// * 如果 gt > n-m, 说明 [m+1,n] 内的数字有重复, 则 l = m+1, 重复上面步骤.
// * 如果上面两种情况都不是, 则重复的数字为 m, 直接返回 m.
// 复杂度分析:
// * 时间: O(n*lgn)
// * 空间: O(1)
func findDuplicate(nums []int) int {
N := len(nums)
n := N - 1
for l, r := 1, n; l <= r; {
m, less, gt := (l+r)/2, 0, 0
for i := 0; i < N; i++ {
if nums[i] < m {
less++
} else if nums[i] > m {
gt++
}
}
if less > m-1 {
r = m - 1
} else if gt > n-m {
l = m + 1
} else {
return m
}
}
return 1
}
func main() {
cases := [][]int{
{},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(c)
}
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operator
import (
"fmt"
"github.com/pingcap/tidb/resourcemanager/pool/workerpool"
"github.com/pingcap/tidb/resourcemanager/util"
)
// Operator is the basic operation unit in the task execution.
type Operator interface {
Open() error
Close() error
String() string
}
// AsyncOperator process the data in async way.
//
// Eg: The sink of AsyncOperator op1 and the source of op2
// use the same channel, Then op2's worker will handle
// the result from op1.
type AsyncOperator[T, R any] struct {
pool *workerpool.WorkerPool[T, R]
}
// NewAsyncOperatorWithTransform create an AsyncOperator with a transform function.
func NewAsyncOperatorWithTransform[T, R any](name string, workerNum int, transform func(T) R) *AsyncOperator[T, R] {
pool := workerpool.NewWorkerPool(name, util.DistTask, workerNum, newAsyncWorkerCtor(transform))
return NewAsyncOperator(pool)
}
// NewAsyncOperator create an AsyncOperator.
func NewAsyncOperator[T, R any](pool *workerpool.WorkerPool[T, R]) *AsyncOperator[T, R] {
return &AsyncOperator[T, R]{
pool: pool,
}
}
// Open implements the Operator's Open interface.
func (c *AsyncOperator[T, R]) Open() error {
c.pool.Start()
return nil
}
// Close implements the Operator's Close interface.
func (c *AsyncOperator[T, R]) Close() error {
// Wait all tasks done.
// We don't need to close the task channel because
// it is closed by the workerpool.
c.pool.Wait()
c.pool.Release()
return nil
}
// String show the name.
func (*AsyncOperator[T, R]) String() string {
var zT T
var zR R
return fmt.Sprintf("AsyncOp[%T, %T]", zT, zR)
}
// SetSource set the source channel.
func (c *AsyncOperator[T, R]) SetSource(ch DataChannel[T]) {
c.pool.SetTaskReceiver(ch.Channel())
}
// SetSink set the sink channel.
func (c *AsyncOperator[T, R]) SetSink(ch DataChannel[R]) {
c.pool.SetResultSender(ch.Channel())
}
type asyncWorker[T, R any] struct {
transform func(T) R
}
func newAsyncWorkerCtor[T, R any](transform func(T) R) func() workerpool.Worker[T, R] {
return func() workerpool.Worker[T, R] {
return &asyncWorker[T, R]{
transform: transform,
}
}
}
func (s *asyncWorker[T, R]) HandleTask(task T) R {
return s.transform(task)
}
func (*asyncWorker[T, R]) Close() {}
|
package main
import (
"flag"
"html/template"
"io/ioutil"
"log"
"math/rand"
"net/http"
"strings"
"time"
)
func handler(w http.ResponseWriter, r *http.Request) {
type Image struct {
Filename string
}
var paths []string
dir, err := ioutil.ReadDir("assets/")
if err != nil {
log.Println("ERROR: ", err)
http.Error(w, "internal server error", http.StatusInternalServerError)
return
}
for _, f := range dir {
if !f.IsDir() && strings.HasSuffix(f.Name(), "jpg") {
paths = append(paths, f.Name())
}
}
rand.Seed(time.Now().UTC().UnixNano())
image := Image{paths[rand.Intn(len(paths))]}
t, err := template.ParseFiles("index.html")
if err != nil {
log.Println("ERROR: ", err)
}
t.Execute(w, image)
}
// AssetsHandler handles serving static files
func AssetsHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Serving " + r.URL.Path[1:])
http.ServeFile(w, r, r.URL.Path[1:])
}
func main() {
var addr string
flag.StringVar(&addr, "addr", "127.0.0.1:8080", "address to run on")
flag.Parse()
http.HandleFunc("/assets/", AssetsHandler)
http.HandleFunc("/", handler)
log.Printf("Running server on addr %s", addr)
http.ListenAndServe(addr, nil)
}
|
package pie
// All will return true if all callbacks return true. It follows the same logic
// as the all() function in Python.
//
// If the list is empty then true is always returned.
func All[T any](ss []T, fn func(value T) bool) bool {
for _, value := range ss {
if !fn(value) {
return false
}
}
return true
}
|
package online
import ("im/engine"
"sync")
var onlineUser sync.Map
type User struct {
Id string
Client *engine.Client
}
func GetAllUser() *sync.Map {
return &onlineUser
}
func GetClientById(id string) *engine.Client {
c,ok:= onlineUser.Load(id)
if ok {
return c.(*engine.Client)
}
return nil
}
func SetOnlineUser(user *User) {
onlineUser.Store(user.Id,user.Client)
}
func SendById(id string,content string) {
client := GetClientById(id)
if client !=nil{
client.Send<-[]byte(content)
}
}
//func RemoveOnlineUser(id string) {
// delete(online,id)
//} |
package main
func main() {
// Run generate access token
GenerateAccessToken()
// Run basic sample for me
BasicMe()
// Run basic sample for feed
BasicFeed()
// Run basic sample for how to POST a feed
BasicFeedPost()
// Run basic sample for how to DELETE a feed
BasicFeedDelete()
}
|
package controllers
import (
"context"
"fmt"
"github.com/genshen/ssh-web-console/src/models"
"github.com/genshen/ssh-web-console/src/utils"
"golang.org/x/crypto/ssh"
"io"
"log"
"net/http"
"nhooyr.io/websocket"
"time"
)
//const SSH_EGG = `genshen<genshenchu@gmail.com> https://github.com/genshen/sshWebConsole"`
type SSHWebSocketHandle struct {
bufferFlushCycle int
}
func NewSSHWSHandle(bfc int) *SSHWebSocketHandle {
var handle SSHWebSocketHandle
handle.bufferFlushCycle = bfc
return &handle
}
// clear session after ssh closed.
func (c *SSHWebSocketHandle) ShouldClearSessionAfterExec() bool {
return true
}
// handle webSocket connection.
func (c *SSHWebSocketHandle) ServeAfterAuthenticated(w http.ResponseWriter, r *http.Request, claims *utils.Claims, session utils.Session) {
// init webSocket connection
conn, err := websocket.Accept(w, r, nil)
if err != nil {
http.Error(w, "Cannot setup WebSocket connection:", 400)
log.Println("Error: Cannot setup WebSocket connection:", err)
return
}
defer conn.Close(websocket.StatusNormalClosure, "closed")
userInfo := session.Value.(models.UserInfo)
cols := utils.GetQueryInt32(r, "cols", 120)
rows := utils.GetQueryInt32(r, "rows", 32)
sshAuth := ssh.Password(userInfo.Password)
if err := c.SSHShellOverWS(r.Context(), conn, claims.Host, claims.Port, userInfo.Username, sshAuth, cols, rows); err != nil {
log.Println("Error,", err)
utils.Abort(w, err.Error(), 500)
}
}
// ssh shell over websocket
// first,we establish a ssh connection to ssh server when a webSocket comes;
// then we deliver ssh data via ssh connection between browser and ssh server.
// That is, read webSocket data from browser (e.g. 'ls' command) and send data to ssh server via ssh connection;
// the other hand, read returned ssh data from ssh server and write back to browser via webSocket API.
func (c *SSHWebSocketHandle) SSHShellOverWS(ctx context.Context, ws *websocket.Conn, host string, port int, username string, auth ssh.AuthMethod, cols, rows uint32) error {
//setup ssh connection
sshEntity := utils.SSHShellSession{
Node: utils.NewSSHNode(host, port),
}
// set io for ssh session
var wsBuff WebSocketBufferWriter
sshEntity.WriterPipe = &wsBuff
var sshConn utils.SSHConnInterface = &sshEntity // set interface
err := sshConn.Connect(username, auth)
if err != nil {
return fmt.Errorf("cannot setup ssh connection %w", err)
}
defer sshConn.Close()
// config ssh
sshSession, err := sshConn.Config(cols, rows)
if err != nil {
return fmt.Errorf("configure ssh error: %w", err)
}
// an egg:
//if err := sshSession.Setenv("SSH_EGG", SSH_EGG); err != nil {
// log.Println(err)
//}
// after configure, the WebSocket is ok.
defer wsBuff.Flush(ctx, websocket.MessageBinary, ws)
done := make(chan bool, 3)
setDone := func() { done <- true }
// most messages are ssh output, not webSocket input
writeMessageToSSHServer := func(wc io.WriteCloser) { // read messages from webSocket
defer setDone()
for {
msgType, p, err := ws.Read(ctx)
// if WebSocket is closed by some reason, then this func will return,
// and 'done' channel will be set, the outer func will reach to the end.
// then ssh session will be closed in defer.
if err != nil {
log.Println("Error: error reading webSocket message:", err)
return
}
if err = DispatchMessage(sshSession, msgType, p, wc); err != nil {
log.Println("Error: error write data to ssh server:", err)
return
}
}
}
stopper := make(chan bool) // timer stopper
// check webSocketWriterBuffer(if not empty,then write back to webSocket) every 120 ms.
writeBufferToWebSocket := func() {
defer setDone()
tick := time.NewTicker(time.Millisecond * time.Duration(c.bufferFlushCycle))
//for range time.Tick(120 * time.Millisecond){}
defer tick.Stop()
for {
select {
case <-tick.C:
if err := wsBuff.Flush(ctx, websocket.MessageBinary, ws); err != nil {
log.Println("Error: error sending data via webSocket:", err)
return
}
case <-stopper:
return
}
}
}
go writeMessageToSSHServer(sshEntity.StdinPipe)
go writeBufferToWebSocket()
go func() {
defer setDone()
if err := sshSession.Wait(); err != nil {
log.Println("ssh exist from server", err)
}
// if ssh is closed (wait returns), then 'done', web socket will be closed.
// by the way, buffered data will be flushed before closing WebSocket.
}()
<-done
stopper <- true // stop tick timer(if tick is finished by due to the bad WebSocket, this line will just only set channel(no bad effect). )
log.Println("Info: websocket finished!")
return nil
}
|
package apiservice
import (
"context"
"reflect"
apiservicev1alpha1 "github.com/ligangty/api-service/pkg/apis/apiservice/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller_apiservice")
/**
* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
* business logic. Delete these comments after modifying this file.*
*/
// Add creates a new APIService Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileAPIService{client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("apiservice-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource APIService
err = c.Watch(&source.Kind{Type: &apiservicev1alpha1.APIService{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// TODO(user): Modify this to be the types you create that are owned by the primary resource
// Watch for changes to secondary resource Pods and requeue the owner APIService
// err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
// IsController: true,
// OwnerType: &apiservicev1alpha1.APIService{},
// })
// if err != nil {
// return err
// }
// TODO(user): Modify this to be the types you create that are owned by the primary resource
// Watch for changes to secondary resource Pods and requeue the owner APIService
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &apiservicev1alpha1.APIService{},
})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &apiservicev1alpha1.APIService{},
})
if err != nil {
return err
}
return nil
}
// blank assignment to verify that ReconcileAPIService implements reconcile.Reconciler
var _ reconcile.Reconciler = &ReconcileAPIService{}
// ReconcileAPIService reconciles a APIService object
type ReconcileAPIService struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
}
// Reconcile reads that state of the cluster for a APIService object and makes changes based on the state read
// and what is in the APIService.Spec
// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
// a Pod as an example
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileAPIService) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling APIService")
// Fetch the APIService instance
apiService := &apiservicev1alpha1.APIService{}
err := r.client.Get(context.TODO(), request.NamespacedName, apiService)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
deployment := &appsv1.Deployment{}
err = r.client.Get(context.TODO(), types.NamespacedName{Name: apiService.Name, Namespace: apiService.Namespace}, deployment)
if err != nil {
// Define a new Deployment
dep := r.deploymentForAPIService(apiService)
reqLogger.Info("Creating a new Deployment.", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
err = r.client.Create(context.TODO(), dep)
if err != nil {
reqLogger.Error(err, "Failed to create new Deployment.", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
return reconcile.Result{}, err
}
// Deployment created successfully - return and requeue
// NOTE: that the requeue is made with the purpose to provide the deployment object for the next step to ensure the deployment size is the same as the spec.
// Also, you could GET the deployment object again instead of requeue if you wish. See more over it here: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler
return reconcile.Result{Requeue: true}, nil
} else if err != nil {
reqLogger.Error(err, "Failed to get Deployment.")
return reconcile.Result{}, err
}
// Check if the Service already exists, if not create a new one
// NOTE: The Service is used to expose the Deployment. However, the Service is not required at all for the apiService example to work. The purpose is to add more examples of what you can do in your operator project.
service := &corev1.Service{}
err = r.client.Get(context.TODO(), types.NamespacedName{Name: apiService.Name, Namespace: apiService.Namespace}, service)
if err != nil && errors.IsNotFound(err) {
// Define a new Service object
ser := r.serviceForAPIService(apiService)
reqLogger.Info("Creating a new Service.", "Service.Namespace", ser.Namespace, "Service.Name", ser.Name)
err = r.client.Create(context.TODO(), ser)
if err != nil {
reqLogger.Error(err, "Failed to create new Service.", "Service.Namespace", ser.Namespace, "Service.Name", ser.Name)
return reconcile.Result{}, err
}
} else if err != nil {
reqLogger.Error(err, "Failed to get Service.")
return reconcile.Result{}, err
}
// Update the APIService status with the pod names
// List the pods for this apiService's deployment
podList := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace(apiService.Namespace),
client.MatchingLabels(labelsForAPIService(apiService.Name)),
}
err = r.client.List(context.TODO(), podList, listOpts...)
if err != nil {
reqLogger.Error(err, "Failed to list pods.", "APIService.Namespace", apiService.Namespace, "APIService.Name", apiService.Name)
return reconcile.Result{}, err
}
podNames := getPodNames(podList.Items)
// Update status.Nodes if needed
if !reflect.DeepEqual(podNames, apiService.Status.Nodes) {
apiService.Status.Nodes = podNames
err := r.client.Status().Update(context.TODO(), apiService)
if err != nil {
reqLogger.Error(err, "Failed to update APIService status.")
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (r *ReconcileAPIService) deploymentForAPIService(m *apiservicev1alpha1.APIService) *appsv1.Deployment {
ls := labelsForAPIService(m.Name)
replicas := m.Spec.Size
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name,
Namespace: m.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: ls,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ls,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Image: "quay.io/ligangty/test-api:latest",
Name: "api-service",
Command: []string{"api-server"},
Ports: []corev1.ContainerPort{{
ContainerPort: 8080,
Name: "api-service",
}},
}},
},
},
},
}
// Set APIService instance as the owner of the Deployment.
controllerutil.SetControllerReference(m, dep, r.scheme)
return dep
}
// serviceForAPIService function takes in a APIService object and returns a Service for that object.
func (r *ReconcileAPIService) serviceForAPIService(m *apiservicev1alpha1.APIService) *corev1.Service {
ls := labelsForAPIService(m.Name)
ser := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name,
Namespace: m.Namespace,
},
Spec: corev1.ServiceSpec{
Selector: ls,
Ports: []corev1.ServicePort{
{
Port: 8080,
Name: m.Name,
},
},
},
}
// Set APIService instance as the owner of the Service.
controllerutil.SetControllerReference(m, ser, r.scheme)
return ser
}
// labelsForAPIService returns the labels for selecting the resources
// belonging to the given apiService CR name.
func labelsForAPIService(name string) map[string]string {
return map[string]string{"app": "apiservice", "apiservice_cr": name}
}
// getPodNames returns the pod names of the array of pods passed in
func getPodNames(pods []corev1.Pod) []string {
var podNames []string
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
return podNames
}
|
package types
import (
"errors"
. "grm-service/util"
)
var (
ErrInvalidDBInfo = errors.New(TR("Invalid database connection info"))
ErrInvalidNFSInfo = errors.New(TR("Invalid nfs file system info"))
ErrDeviceNameExists = errors.New(TR("device name already exists"))
ErrDeviceVolume = errors.New(TR("invalid volume size"))
ErrGetDeviceInfo = errors.New("Failed to get storage volume")
)
// 存储设备
type Device struct {
Id int `json:"id"`
Label string `json:"label"`
StorageType string `json:"storage_type"`
StorageOrg string `json:"storage_org"`
DataType string `json:"data_types"`
IpAddress string `json:"ip_address"`
ServiceName string `json:"server_name"`
DBPort string `json:"db_port"`
DBUser string `json:"db_user"`
DBPwd string `json:"db_pwd"`
GeoStorage string `json:"geo_storage"`
FileSys string `json:"file_sys,omitempty"`
MountPath string `json:"mount_path,omitempty"`
Volume string `json:"total_volume"`
Used string `json:"used"`
UsedPercent string `json:"used_percent"`
CreateTime string `json:"create_time"`
Description string `json:"description"`
}
type DeviceList []Device
// 设备更新
type UpdateDeviceRequest struct {
Label string `json:"label"`
DateType string `json:"data_type"`
IpAddress string `json:"ip_address"`
DBPort string `json:"db_port"`
DBUser string `json:"db_user"`
DBPwd string `json:"db_pwd"`
FileSys string `json:"file_sys"`
MountPath string `json:"mount_path"`
Description string `json:"description"`
Volume string `json:"volume"`
GeoServer string `json:"geo_server"`
}
|
/*
Given two integers, compute the two numbers that come from the blending the bits of the binary numbers of equal length(same number of digits, a number with less digits has zeros added), one after the other, like such:
2 1
10 01
1 0
1001
0 1
0110
some examples:
Input Binary Conversion Output
1,0 1,0 10,01 2,1
1,2 01,10 0110,1001 6,9
2,3 10,11 1101,1110 13,14
4,9 0100,1001 01100001,10010010 97,146
12,12 1100,1100 11110000,11110000 240,240
1,3 01,11 0111,1011 7,11
7,11 0111,1011 01101111,10011111 111,159
7,3 111,011 101111,011111 47,31
The program must take integers as inputs and give integers as outputs
*/
package main
import (
"fmt"
"math/bits"
)
func main() {
test(1, 0, 2, 1)
test(1, 2, 6, 9)
test(2, 3, 13, 14)
test(4, 9, 97, 146)
test(12, 12, 240, 240)
test(1, 3, 7, 11)
test(7, 11, 111, 159)
test(7, 3, 47, 31)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(x, y, rxy, ryx uint64) {
xy, yx := blend(x, y)
fmt.Println(xy, yx)
assert(xy == rxy)
assert(yx == ryx)
}
func blend(x, y uint64) (uint64, uint64) {
n := max(bitlen(x), bitlen(y))
return interleave(x, y, n), interleave(y, x, n)
}
func interleave(x, y uint64, n int) uint64 {
r := uint64(0)
for i := n - 1; i >= 0; i-- {
r = r<<2 | ((x>>i)&1)<<1 | ((y >> i) & 1)
}
return r
}
func bitlen(x uint64) int {
return 64 - bits.LeadingZeros64(x)
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package ui
import (
"image"
"io"
"net/http"
"os"
"path"
"strings"
"github.com/go-gl/gl/v2.1/gl"
)
const textureSize = 4096
const textureDim = textureSize / 256
const textureCount = textureDim * textureDim
type Texture struct {
texture uint32
lookup map[string]int
reverse [textureCount]string
access [textureCount]int
counter int
ch chan string
}
func NewTexture() *Texture {
texture := createTexture()
gl.BindTexture(gl.TEXTURE_2D, texture)
gl.TexImage2D(
gl.TEXTURE_2D, 0, gl.RGBA,
textureSize, textureSize,
0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
gl.BindTexture(gl.TEXTURE_2D, 0)
t := Texture{}
t.texture = texture
t.lookup = make(map[string]int)
t.ch = make(chan string, 1024)
return &t
}
func (t *Texture) Purge() {
for {
select {
case path := <-t.ch:
delete(t.lookup, path)
default:
return
}
}
}
func (t *Texture) Bind() {
gl.BindTexture(gl.TEXTURE_2D, t.texture)
}
func (t *Texture) Unbind() {
gl.BindTexture(gl.TEXTURE_2D, 0)
}
func (t *Texture) Lookup(path string) (x, y, dx, dy float32) {
if index, ok := t.lookup[path]; ok {
return t.coord(index)
} else {
return t.coord(t.load(path))
}
}
func (t *Texture) mark(index int) {
t.counter++
t.access[index] = t.counter
}
func (t *Texture) lru() int {
minIndex := 0
minValue := t.counter + 1
for i, n := range t.access {
if n < minValue {
minIndex = i
minValue = n
}
}
return minIndex
}
func (t *Texture) coord(index int) (x, y, dx, dy float32) {
x = float32(index%textureDim) / textureDim
y = float32(index/textureDim) / textureDim
dx = 1.0 / textureDim
dy = dx * 240 / 256
return
}
func (t *Texture) load(path string) int {
index := t.lru()
delete(t.lookup, t.reverse[index])
t.mark(index)
t.lookup[path] = index
t.reverse[index] = path
x := int32((index % textureDim) * 256)
y := int32((index / textureDim) * 256)
im := copyImage(t.loadThumbnail(path))
size := im.Rect.Size()
gl.TexSubImage2D(
gl.TEXTURE_2D, 0, x, y, int32(size.X), int32(size.Y),
gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(im.Pix))
return index
}
func (t *Texture) loadThumbnail(romPath string) image.Image {
_, name := path.Split(romPath)
name = strings.TrimSuffix(name, ".nes")
name = strings.Replace(name, "_", " ", -1)
name = strings.Title(name)
im := CreateGenericThumbnail(name)
hash, err := hashFile(romPath)
if err != nil {
return im
}
filename := thumbnailPath(hash)
if _, err := os.Stat(filename); os.IsNotExist(err) {
go t.downloadThumbnail(romPath, hash)
return im
} else {
thumbnail, err := loadPNG(filename)
if err != nil {
return im
}
return thumbnail
}
}
func (t *Texture) downloadThumbnail(romPath, hash string) error {
url := thumbnailURL(hash)
filename := thumbnailPath(hash)
dir, _ := path.Split(filename)
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
if _, err := io.Copy(file, resp.Body); err != nil {
return err
}
t.ch <- romPath
return nil
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"cmp"
"context"
"fmt"
"slices"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/executor/internal/vecgroupchecker"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/channel"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/set"
"github.com/twmb/murmur3"
"go.uber.org/zap"
)
type aggPartialResultMapper map[string][]aggfuncs.PartialResult
// baseHashAggWorker stores the common attributes of HashAggFinalWorker and HashAggPartialWorker.
// nolint:structcheck
type baseHashAggWorker struct {
ctx sessionctx.Context
finishCh <-chan struct{}
aggFuncs []aggfuncs.AggFunc
maxChunkSize int
stats *AggWorkerStat
memTracker *memory.Tracker
BInMap int // indicate there are 2^BInMap buckets in Golang Map.
}
func newBaseHashAggWorker(ctx sessionctx.Context, finishCh <-chan struct{}, aggFuncs []aggfuncs.AggFunc,
maxChunkSize int, memTrack *memory.Tracker) baseHashAggWorker {
baseWorker := baseHashAggWorker{
ctx: ctx,
finishCh: finishCh,
aggFuncs: aggFuncs,
maxChunkSize: maxChunkSize,
memTracker: memTrack,
BInMap: 0,
}
return baseWorker
}
// HashAggPartialWorker indicates the partial workers of parallel hash agg execution,
// the number of the worker can be set by `tidb_hashagg_partial_concurrency`.
type HashAggPartialWorker struct {
baseHashAggWorker
inputCh chan *chunk.Chunk
outputChs []chan *HashAggIntermData
globalOutputCh chan *AfFinalResult
giveBackCh chan<- *HashAggInput
partialResultsMap aggPartialResultMapper
groupByItems []expression.Expression
groupKey [][]byte
// chk stores the input data from child,
// and is reused by childExec and partial worker.
chk *chunk.Chunk
}
// HashAggFinalWorker indicates the final workers of parallel hash agg execution,
// the number of the worker can be set by `tidb_hashagg_final_concurrency`.
type HashAggFinalWorker struct {
baseHashAggWorker
rowBuffer []types.Datum
mutableRow chunk.MutRow
partialResultMap aggPartialResultMapper
groupSet set.StringSetWithMemoryUsage
inputCh chan *HashAggIntermData
outputCh chan *AfFinalResult
finalResultHolderCh chan *chunk.Chunk
groupKeys [][]byte
}
// AfFinalResult indicates aggregation functions final result.
type AfFinalResult struct {
chk *chunk.Chunk
err error
giveBackCh chan *chunk.Chunk
}
// HashAggExec deals with all the aggregate functions.
// It is built from the Aggregate Plan. When Next() is called, it reads all the data from Src
// and updates all the items in PartialAggFuncs.
// The parallel execution flow is as the following graph shows:
/*
+-------------+
| Main Thread |
+------+------+
^
|
+
+-+- +-+
| | ...... | | finalOutputCh
+++- +-+
^
|
+---------------+
| |
+--------------+ +--------------+
| final worker | ...... | final worker |
+------------+-+ +-+------------+
^ ^
| |
+-+ +-+ ...... +-+
| | | | | |
... ... ... partialOutputChs
| | | | | |
+++ +++ +++
^ ^ ^
+-+ | | |
| | +--------o----+ |
inputCh +-+ | +-----------------+---+
| | | |
... +---+------------+ +----+-----------+
| | | partial worker | ...... | partial worker |
+++ +--------------+-+ +-+--------------+
| ^ ^
| | |
+----v---------+ +++ +-+ +++
| data fetcher | +------> | | | | ...... | | partialInputChs
+--------------+ +-+ +-+ +-+
*/
type HashAggExec struct {
exec.BaseExecutor
sc *stmtctx.StatementContext
PartialAggFuncs []aggfuncs.AggFunc
FinalAggFuncs []aggfuncs.AggFunc
partialResultMap aggPartialResultMapper
bInMap int64 // indicate there are 2^bInMap buckets in partialResultMap
groupSet set.StringSetWithMemoryUsage
groupKeys []string
cursor4GroupKey int
GroupByItems []expression.Expression
groupKeyBuffer [][]byte
finishCh chan struct{}
finalOutputCh chan *AfFinalResult
partialOutputChs []chan *HashAggIntermData
inputCh chan *HashAggInput
partialInputChs []chan *chunk.Chunk
partialWorkers []HashAggPartialWorker
finalWorkers []HashAggFinalWorker
defaultVal *chunk.Chunk
childResult *chunk.Chunk
// isChildReturnEmpty indicates whether the child executor only returns an empty input.
isChildReturnEmpty bool
// After we support parallel execution for aggregation functions with distinct,
// we can remove this attribute.
isUnparallelExec bool
parallelExecInitialized bool
prepared bool
executed bool
memTracker *memory.Tracker // track memory usage.
diskTracker *disk.Tracker
stats *HashAggRuntimeStats
// listInDisk is the chunks to store row values for spilled data.
// The HashAggExec may be set to `spill mode` multiple times, and all spilled data will be appended to ListInDisk.
listInDisk *chunk.ListInDisk
// numOfSpilledChks indicates the number of all the spilled chunks.
numOfSpilledChks int
// offsetOfSpilledChks indicates the offset of the chunk be read from the disk.
// In each round of processing, we need to re-fetch all the chunks spilled in the last one.
offsetOfSpilledChks int
// inSpillMode indicates whether HashAgg is in `spill mode`.
// When HashAgg is in `spill mode`, the size of `partialResultMap` is no longer growing and all the data fetched
// from the child executor is spilled to the disk.
inSpillMode uint32
// tmpChkForSpill is the temp chunk for spilling.
tmpChkForSpill *chunk.Chunk
// spillAction save the Action for spilling.
spillAction *AggSpillDiskAction
// isChildDrained indicates whether the all data from child has been taken out.
isChildDrained bool
}
// HashAggInput indicates the input of hash agg exec.
type HashAggInput struct {
chk *chunk.Chunk
// giveBackCh is bound with specific partial worker,
// it's used to reuse the `chk`,
// and tell the data-fetcher which partial worker it should send data to.
giveBackCh chan<- *chunk.Chunk
}
// HashAggIntermData indicates the intermediate data of aggregation execution.
type HashAggIntermData struct {
groupKeys []string
cursor int
partialResultMap aggPartialResultMapper
}
// getPartialResultBatch fetches a batch of partial results from HashAggIntermData.
func (d *HashAggIntermData) getPartialResultBatch(_ *stmtctx.StatementContext, prs [][]aggfuncs.PartialResult, _ []aggfuncs.AggFunc, maxChunkSize int) (_ [][]aggfuncs.PartialResult, groupKeys []string, reachEnd bool) {
keyStart := d.cursor
for ; d.cursor < len(d.groupKeys) && len(prs) < maxChunkSize; d.cursor++ {
prs = append(prs, d.partialResultMap[d.groupKeys[d.cursor]])
}
if d.cursor == len(d.groupKeys) {
reachEnd = true
}
return prs, d.groupKeys[keyStart:d.cursor], reachEnd
}
// Close implements the Executor Close interface.
func (e *HashAggExec) Close() error {
if e.stats != nil {
defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats)
}
if e.isUnparallelExec {
var firstErr error
e.childResult = nil
e.groupSet, _ = set.NewStringSetWithMemoryUsage()
e.partialResultMap = nil
if e.memTracker != nil {
e.memTracker.ReplaceBytesUsed(0)
}
if e.listInDisk != nil {
firstErr = e.listInDisk.Close()
}
e.spillAction, e.tmpChkForSpill = nil, nil
if err := e.BaseExecutor.Close(); firstErr == nil {
firstErr = err
}
return firstErr
}
if e.parallelExecInitialized {
// `Close` may be called after `Open` without calling `Next` in test.
if !e.prepared {
close(e.inputCh)
for _, ch := range e.partialOutputChs {
close(ch)
}
for _, ch := range e.partialInputChs {
close(ch)
}
close(e.finalOutputCh)
}
close(e.finishCh)
for _, ch := range e.partialOutputChs {
channel.Clear(ch)
}
for _, ch := range e.partialInputChs {
channel.Clear(ch)
}
channel.Clear(e.finalOutputCh)
e.executed = false
if e.memTracker != nil {
e.memTracker.ReplaceBytesUsed(0)
}
}
return e.BaseExecutor.Close()
}
// Open implements the Executor Open interface.
func (e *HashAggExec) Open(ctx context.Context) error {
failpoint.Inject("mockHashAggExecBaseExecutorOpenReturnedError", func(val failpoint.Value) {
if val, _ := val.(bool); val {
failpoint.Return(errors.New("mock HashAggExec.baseExecutor.Open returned error"))
}
})
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.prepared = false
if e.memTracker != nil {
e.memTracker.Reset()
} else {
e.memTracker = memory.NewTracker(e.ID(), -1)
}
if e.Ctx().GetSessionVars().TrackAggregateMemoryUsage {
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
}
if e.isUnparallelExec {
e.initForUnparallelExec()
return nil
}
e.initForParallelExec(e.Ctx())
return nil
}
func (e *HashAggExec) initForUnparallelExec() {
var setSize int64
e.groupSet, setSize = set.NewStringSetWithMemoryUsage()
e.partialResultMap = make(aggPartialResultMapper)
e.bInMap = 0
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice*(1<<e.bInMap) + setSize)
e.groupKeyBuffer = make([][]byte, 0, 8)
e.childResult = tryNewCacheChunk(e.Children(0))
e.memTracker.Consume(e.childResult.MemoryUsage())
e.offsetOfSpilledChks, e.numOfSpilledChks = 0, 0
e.executed, e.isChildDrained = false, false
e.listInDisk = chunk.NewListInDisk(retTypes(e.Children(0)))
e.tmpChkForSpill = tryNewCacheChunk(e.Children(0))
if vars := e.Ctx().GetSessionVars(); vars.TrackAggregateMemoryUsage && variable.EnableTmpStorageOnOOM.Load() {
e.diskTracker = disk.NewTracker(e.ID(), -1)
e.diskTracker.AttachTo(vars.StmtCtx.DiskTracker)
e.listInDisk.GetDiskTracker().AttachTo(e.diskTracker)
vars.MemTracker.FallbackOldAndSetNewActionForSoftLimit(e.ActionSpill())
}
}
func closeBaseExecutor(b *exec.BaseExecutor) {
if r := recover(); r != nil {
// Release the resource, but throw the panic again and let the top level handle it.
terror.Log(b.Close())
logutil.BgLogger().Warn("panic in Open(), close base executor and throw exception again")
panic(r)
}
}
func (e *HashAggExec) initForParallelExec(_ sessionctx.Context) {
sessionVars := e.Ctx().GetSessionVars()
finalConcurrency := sessionVars.HashAggFinalConcurrency()
partialConcurrency := sessionVars.HashAggPartialConcurrency()
e.isChildReturnEmpty = true
e.finalOutputCh = make(chan *AfFinalResult, finalConcurrency+partialConcurrency+1)
e.inputCh = make(chan *HashAggInput, partialConcurrency)
e.finishCh = make(chan struct{}, 1)
e.partialInputChs = make([]chan *chunk.Chunk, partialConcurrency)
for i := range e.partialInputChs {
e.partialInputChs[i] = make(chan *chunk.Chunk, 1)
}
e.partialOutputChs = make([]chan *HashAggIntermData, finalConcurrency)
for i := range e.partialOutputChs {
e.partialOutputChs[i] = make(chan *HashAggIntermData, partialConcurrency)
}
e.partialWorkers = make([]HashAggPartialWorker, partialConcurrency)
e.finalWorkers = make([]HashAggFinalWorker, finalConcurrency)
e.initRuntimeStats()
// Init partial workers.
for i := 0; i < partialConcurrency; i++ {
w := HashAggPartialWorker{
baseHashAggWorker: newBaseHashAggWorker(e.Ctx(), e.finishCh, e.PartialAggFuncs, e.MaxChunkSize(), e.memTracker),
inputCh: e.partialInputChs[i],
outputChs: e.partialOutputChs,
giveBackCh: e.inputCh,
globalOutputCh: e.finalOutputCh,
partialResultsMap: make(aggPartialResultMapper),
groupByItems: e.GroupByItems,
chk: tryNewCacheChunk(e.Children(0)),
groupKey: make([][]byte, 0, 8),
}
// There is a bucket in the empty partialResultsMap.
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice * (1 << w.BInMap))
if e.stats != nil {
w.stats = &AggWorkerStat{}
e.stats.PartialStats = append(e.stats.PartialStats, w.stats)
}
e.memTracker.Consume(w.chk.MemoryUsage())
e.partialWorkers[i] = w
input := &HashAggInput{
chk: newFirstChunk(e.Children(0)),
giveBackCh: w.inputCh,
}
e.memTracker.Consume(input.chk.MemoryUsage())
e.inputCh <- input
}
// Init final workers.
for i := 0; i < finalConcurrency; i++ {
groupSet, setSize := set.NewStringSetWithMemoryUsage()
w := HashAggFinalWorker{
baseHashAggWorker: newBaseHashAggWorker(e.Ctx(), e.finishCh, e.FinalAggFuncs, e.MaxChunkSize(), e.memTracker),
partialResultMap: make(aggPartialResultMapper),
groupSet: groupSet,
inputCh: e.partialOutputChs[i],
outputCh: e.finalOutputCh,
finalResultHolderCh: make(chan *chunk.Chunk, 1),
rowBuffer: make([]types.Datum, 0, e.Schema().Len()),
mutableRow: chunk.MutRowFromTypes(retTypes(e)),
groupKeys: make([][]byte, 0, 8),
}
// There is a bucket in the empty partialResultsMap.
e.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice*(1<<w.BInMap) + setSize)
groupSet.SetTracker(e.memTracker)
if e.stats != nil {
w.stats = &AggWorkerStat{}
e.stats.FinalStats = append(e.stats.FinalStats, w.stats)
}
e.finalWorkers[i] = w
e.finalWorkers[i].finalResultHolderCh <- newFirstChunk(e)
}
e.parallelExecInitialized = true
}
func (w *HashAggPartialWorker) getChildInput() bool {
select {
case <-w.finishCh:
return false
case chk, ok := <-w.inputCh:
if !ok {
return false
}
w.chk.SwapColumns(chk)
w.giveBackCh <- &HashAggInput{
chk: chk,
giveBackCh: w.inputCh,
}
}
return true
}
func recoveryHashAgg(output chan *AfFinalResult, r interface{}) {
err := errors.Errorf("%v", r)
output <- &AfFinalResult{err: errors.Errorf("%v", r)}
logutil.BgLogger().Error("parallel hash aggregation panicked", zap.Error(err), zap.Stack("stack"))
}
func (w *HashAggPartialWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup, finalConcurrency int) {
start := time.Now()
needShuffle, sc := false, ctx.GetSessionVars().StmtCtx
defer func() {
if r := recover(); r != nil {
recoveryHashAgg(w.globalOutputCh, r)
}
if needShuffle {
w.shuffleIntermData(sc, finalConcurrency)
}
w.memTracker.Consume(-w.chk.MemoryUsage())
if w.stats != nil {
w.stats.WorkerTime += int64(time.Since(start))
}
waitGroup.Done()
}()
for {
waitStart := time.Now()
ok := w.getChildInput()
if w.stats != nil {
w.stats.WaitTime += int64(time.Since(waitStart))
}
if !ok {
return
}
execStart := time.Now()
if err := w.updatePartialResult(ctx, sc, w.chk, len(w.partialResultsMap)); err != nil {
w.globalOutputCh <- &AfFinalResult{err: err}
return
}
if w.stats != nil {
w.stats.ExecTime += int64(time.Since(execStart))
w.stats.TaskNum++
}
// The intermData can be promised to be not empty if reaching here,
// so we set needShuffle to be true.
needShuffle = true
}
}
func getGroupKeyMemUsage(groupKey [][]byte) int64 {
mem := int64(0)
for _, key := range groupKey {
mem += int64(cap(key))
}
mem += aggfuncs.DefSliceSize * int64(cap(groupKey))
return mem
}
func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, sc *stmtctx.StatementContext, chk *chunk.Chunk, _ int) (err error) {
memSize := getGroupKeyMemUsage(w.groupKey)
w.groupKey, err = getGroupKey(w.ctx, chk, w.groupKey, w.groupByItems)
failpoint.Inject("ConsumeRandomPanic", nil)
w.memTracker.Consume(getGroupKeyMemUsage(w.groupKey) - memSize)
if err != nil {
return err
}
partialResults := w.getPartialResult(sc, w.groupKey, w.partialResultsMap)
numRows := chk.NumRows()
rows := make([]chunk.Row, 1)
allMemDelta := int64(0)
for i := 0; i < numRows; i++ {
for j, af := range w.aggFuncs {
rows[0] = chk.GetRow(i)
memDelta, err := af.UpdatePartialResult(ctx, rows, partialResults[i][j])
if err != nil {
return err
}
allMemDelta += memDelta
}
}
w.memTracker.Consume(allMemDelta)
return nil
}
// shuffleIntermData shuffles the intermediate data of partial workers to corresponded final workers.
// We only support parallel execution for single-machine, so process of encode and decode can be skipped.
func (w *HashAggPartialWorker) shuffleIntermData(_ *stmtctx.StatementContext, finalConcurrency int) {
groupKeysSlice := make([][]string, finalConcurrency)
for groupKey := range w.partialResultsMap {
finalWorkerIdx := int(murmur3.Sum32([]byte(groupKey))) % finalConcurrency
if groupKeysSlice[finalWorkerIdx] == nil {
groupKeysSlice[finalWorkerIdx] = make([]string, 0, len(w.partialResultsMap)/finalConcurrency)
}
groupKeysSlice[finalWorkerIdx] = append(groupKeysSlice[finalWorkerIdx], groupKey)
}
for i := range groupKeysSlice {
if groupKeysSlice[i] == nil {
continue
}
w.outputChs[i] <- &HashAggIntermData{
groupKeys: groupKeysSlice[i],
partialResultMap: w.partialResultsMap,
}
}
}
// getGroupKey evaluates the group items and args of aggregate functions.
func getGroupKey(ctx sessionctx.Context, input *chunk.Chunk, groupKey [][]byte, groupByItems []expression.Expression) ([][]byte, error) {
numRows := input.NumRows()
avlGroupKeyLen := mathutil.Min(len(groupKey), numRows)
for i := 0; i < avlGroupKeyLen; i++ {
groupKey[i] = groupKey[i][:0]
}
for i := avlGroupKeyLen; i < numRows; i++ {
groupKey = append(groupKey, make([]byte, 0, 10*len(groupByItems)))
}
for _, item := range groupByItems {
tp := item.GetType()
buf, err := expression.GetColumn(tp.EvalType(), numRows)
if err != nil {
return nil, err
}
// In strict sql mode like ‘STRICT_TRANS_TABLES’,can not insert an invalid enum value like 0.
// While in sql mode like '', can insert an invalid enum value like 0,
// then the enum value 0 will have the enum name '', which maybe conflict with user defined enum ''.
// Ref to issue #26885.
// This check is used to handle invalid enum name same with user defined enum name.
// Use enum value as groupKey instead of enum name.
if item.GetType().GetType() == mysql.TypeEnum {
newTp := *tp
newTp.AddFlag(mysql.EnumSetAsIntFlag)
tp = &newTp
}
if err := expression.EvalExpr(ctx, item, tp.EvalType(), input, buf); err != nil {
expression.PutColumn(buf)
return nil, err
}
// This check is used to avoid error during the execution of `EncodeDecimal`.
if item.GetType().GetType() == mysql.TypeNewDecimal {
newTp := *tp
newTp.SetFlen(0)
tp = &newTp
}
groupKey, err = codec.HashGroupKey(ctx.GetSessionVars().StmtCtx, input.NumRows(), buf, groupKey, tp)
if err != nil {
expression.PutColumn(buf)
return nil, err
}
expression.PutColumn(buf)
}
return groupKey, nil
}
func (w *baseHashAggWorker) getPartialResult(_ *stmtctx.StatementContext, groupKey [][]byte, mapper aggPartialResultMapper) [][]aggfuncs.PartialResult {
n := len(groupKey)
partialResults := make([][]aggfuncs.PartialResult, n)
allMemDelta := int64(0)
partialResultSize := w.getPartialResultSliceLenConsiderByteAlign()
for i := 0; i < n; i++ {
var ok bool
if partialResults[i], ok = mapper[string(groupKey[i])]; ok {
continue
}
partialResults[i] = make([]aggfuncs.PartialResult, partialResultSize)
for j, af := range w.aggFuncs {
partialResult, memDelta := af.AllocPartialResult()
partialResults[i][j] = partialResult
allMemDelta += memDelta // the memory usage of PartialResult
}
allMemDelta += int64(partialResultSize * 8)
// Map will expand when count > bucketNum * loadFactor. The memory usage will double.
if len(mapper)+1 > (1<<w.BInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
w.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice * (1 << w.BInMap))
w.BInMap++
}
mapper[string(groupKey[i])] = partialResults[i]
allMemDelta += int64(len(groupKey[i]))
}
failpoint.Inject("ConsumeRandomPanic", nil)
w.memTracker.Consume(allMemDelta)
return partialResults
}
func (w *baseHashAggWorker) getPartialResultSliceLenConsiderByteAlign() int {
length := len(w.aggFuncs)
if len(w.aggFuncs) == 1 {
return 1
}
return length + length&1
}
func (w *HashAggFinalWorker) getPartialInput() (input *HashAggIntermData, ok bool) {
select {
case <-w.finishCh:
return nil, false
case input, ok = <-w.inputCh:
if !ok {
return nil, false
}
}
return
}
func (w *HashAggFinalWorker) consumeIntermData(sctx sessionctx.Context) (err error) {
var (
input *HashAggIntermData
ok bool
intermDataBuffer [][]aggfuncs.PartialResult
groupKeys []string
sc = sctx.GetSessionVars().StmtCtx
)
for {
waitStart := time.Now()
input, ok = w.getPartialInput()
if w.stats != nil {
w.stats.WaitTime += int64(time.Since(waitStart))
}
if !ok {
return nil
}
execStart := time.Now()
if intermDataBuffer == nil {
intermDataBuffer = make([][]aggfuncs.PartialResult, 0, w.maxChunkSize)
}
// Consume input in batches, size of every batch is less than w.maxChunkSize.
for reachEnd := false; !reachEnd; {
intermDataBuffer, groupKeys, reachEnd = input.getPartialResultBatch(sc, intermDataBuffer[:0], w.aggFuncs, w.maxChunkSize)
groupKeysLen := len(groupKeys)
memSize := getGroupKeyMemUsage(w.groupKeys)
w.groupKeys = w.groupKeys[:0]
for i := 0; i < groupKeysLen; i++ {
w.groupKeys = append(w.groupKeys, []byte(groupKeys[i]))
}
failpoint.Inject("ConsumeRandomPanic", nil)
w.memTracker.Consume(getGroupKeyMemUsage(w.groupKeys) - memSize)
finalPartialResults := w.getPartialResult(sc, w.groupKeys, w.partialResultMap)
allMemDelta := int64(0)
for i, groupKey := range groupKeys {
if !w.groupSet.Exist(groupKey) {
allMemDelta += w.groupSet.Insert(groupKey)
}
prs := intermDataBuffer[i]
for j, af := range w.aggFuncs {
memDelta, err := af.MergePartialResult(sctx, prs[j], finalPartialResults[i][j])
if err != nil {
return err
}
allMemDelta += memDelta
}
}
w.memTracker.Consume(allMemDelta)
}
if w.stats != nil {
w.stats.ExecTime += int64(time.Since(execStart))
w.stats.TaskNum++
}
}
}
func (w *HashAggFinalWorker) loadFinalResult(sctx sessionctx.Context) {
waitStart := time.Now()
result, finished := w.receiveFinalResultHolder()
if w.stats != nil {
w.stats.WaitTime += int64(time.Since(waitStart))
}
if finished {
return
}
execStart := time.Now()
memSize := getGroupKeyMemUsage(w.groupKeys)
w.groupKeys = w.groupKeys[:0]
for groupKey := range w.groupSet.StringSet {
w.groupKeys = append(w.groupKeys, []byte(groupKey))
}
failpoint.Inject("ConsumeRandomPanic", nil)
w.memTracker.Consume(getGroupKeyMemUsage(w.groupKeys) - memSize)
partialResults := w.getPartialResult(sctx.GetSessionVars().StmtCtx, w.groupKeys, w.partialResultMap)
for i := 0; i < len(w.groupSet.StringSet); i++ {
for j, af := range w.aggFuncs {
if err := af.AppendFinalResult2Chunk(sctx, partialResults[i][j], result); err != nil {
logutil.BgLogger().Error("HashAggFinalWorker failed to append final result to Chunk", zap.Error(err))
}
}
if len(w.aggFuncs) == 0 {
result.SetNumVirtualRows(result.NumRows() + 1)
}
if result.IsFull() {
w.outputCh <- &AfFinalResult{chk: result, giveBackCh: w.finalResultHolderCh}
result, finished = w.receiveFinalResultHolder()
if finished {
return
}
}
}
w.outputCh <- &AfFinalResult{chk: result, giveBackCh: w.finalResultHolderCh}
if w.stats != nil {
w.stats.ExecTime += int64(time.Since(execStart))
}
}
func (w *HashAggFinalWorker) receiveFinalResultHolder() (*chunk.Chunk, bool) {
select {
case <-w.finishCh:
return nil, true
case result, ok := <-w.finalResultHolderCh:
return result, !ok
}
}
func (w *HashAggFinalWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup) {
start := time.Now()
defer func() {
if r := recover(); r != nil {
recoveryHashAgg(w.outputCh, r)
}
if w.stats != nil {
w.stats.WorkerTime += int64(time.Since(start))
}
waitGroup.Done()
}()
if err := w.consumeIntermData(ctx); err != nil {
w.outputCh <- &AfFinalResult{err: err}
}
w.loadFinalResult(ctx)
}
// Next implements the Executor Next interface.
func (e *HashAggExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.isUnparallelExec {
return e.unparallelExec(ctx, req)
}
return e.parallelExec(ctx, req)
}
func (e *HashAggExec) fetchChildData(ctx context.Context, waitGroup *sync.WaitGroup) {
var (
input *HashAggInput
chk *chunk.Chunk
ok bool
err error
)
defer func() {
if r := recover(); r != nil {
recoveryHashAgg(e.finalOutputCh, r)
}
for i := range e.partialInputChs {
close(e.partialInputChs[i])
}
waitGroup.Done()
}()
for {
select {
case <-e.finishCh:
return
case input, ok = <-e.inputCh:
if !ok {
return
}
chk = input.chk
}
mSize := chk.MemoryUsage()
err = Next(ctx, e.Children(0), chk)
if err != nil {
e.finalOutputCh <- &AfFinalResult{err: err}
e.memTracker.Consume(-mSize)
return
}
if chk.NumRows() == 0 {
e.memTracker.Consume(-mSize)
return
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(chk.MemoryUsage() - mSize)
input.giveBackCh <- chk
}
}
func (e *HashAggExec) waitPartialWorkerAndCloseOutputChs(waitGroup *sync.WaitGroup) {
waitGroup.Wait()
close(e.inputCh)
for input := range e.inputCh {
e.memTracker.Consume(-input.chk.MemoryUsage())
}
for _, ch := range e.partialOutputChs {
close(ch)
}
}
func (e *HashAggExec) waitAllWorkersAndCloseFinalOutputCh(waitGroups ...*sync.WaitGroup) {
for _, waitGroup := range waitGroups {
waitGroup.Wait()
}
close(e.finalOutputCh)
}
func (e *HashAggExec) prepare4ParallelExec(ctx context.Context) {
fetchChildWorkerWaitGroup := &sync.WaitGroup{}
fetchChildWorkerWaitGroup.Add(1)
go e.fetchChildData(ctx, fetchChildWorkerWaitGroup)
// We get the pointers here instead of when we are all finished and adding the time because:
// (1) If there is Apply in the plan tree, executors may be reused (Open()ed and Close()ed multiple times)
// (2) we don't wait all goroutines of HashAgg to exit in HashAgg.Close()
// So we can't write something like:
// atomic.AddInt64(&e.stats.PartialWallTime, int64(time.Since(partialStart)))
// Because the next execution of HashAgg may have started when this goroutine haven't exited and then there will be data race.
var partialWallTimePtr, finalWallTimePtr *int64
if e.stats != nil {
partialWallTimePtr = &e.stats.PartialWallTime
finalWallTimePtr = &e.stats.FinalWallTime
}
partialWorkerWaitGroup := &sync.WaitGroup{}
partialWorkerWaitGroup.Add(len(e.partialWorkers))
partialStart := time.Now()
for i := range e.partialWorkers {
go e.partialWorkers[i].run(e.Ctx(), partialWorkerWaitGroup, len(e.finalWorkers))
}
go func() {
e.waitPartialWorkerAndCloseOutputChs(partialWorkerWaitGroup)
if partialWallTimePtr != nil {
atomic.AddInt64(partialWallTimePtr, int64(time.Since(partialStart)))
}
}()
finalWorkerWaitGroup := &sync.WaitGroup{}
finalWorkerWaitGroup.Add(len(e.finalWorkers))
finalStart := time.Now()
for i := range e.finalWorkers {
go e.finalWorkers[i].run(e.Ctx(), finalWorkerWaitGroup)
}
go func() {
finalWorkerWaitGroup.Wait()
if finalWallTimePtr != nil {
atomic.AddInt64(finalWallTimePtr, int64(time.Since(finalStart)))
}
}()
// All workers may send error message to e.finalOutputCh when they panic.
// And e.finalOutputCh should be closed after all goroutines gone.
go e.waitAllWorkersAndCloseFinalOutputCh(fetchChildWorkerWaitGroup, partialWorkerWaitGroup, finalWorkerWaitGroup)
}
// HashAggExec employs one input reader, M partial workers and N final workers to execute parallelly.
// The parallel execution flow is:
// 1. input reader reads data from child executor and send them to partial workers.
// 2. partial worker receives the input data, updates the partial results, and shuffle the partial results to the final workers.
// 3. final worker receives partial results from all the partial workers, evaluates the final results and sends the final results to the main thread.
func (e *HashAggExec) parallelExec(ctx context.Context, chk *chunk.Chunk) error {
if !e.prepared {
e.prepare4ParallelExec(ctx)
e.prepared = true
}
failpoint.Inject("parallelHashAggError", func(val failpoint.Value) {
if val, _ := val.(bool); val {
failpoint.Return(errors.New("HashAggExec.parallelExec error"))
}
})
if e.executed {
return nil
}
for {
result, ok := <-e.finalOutputCh
if !ok {
e.executed = true
if e.isChildReturnEmpty && e.defaultVal != nil {
chk.Append(e.defaultVal, 0, 1)
}
return nil
}
if result.err != nil {
return result.err
}
chk.SwapColumns(result.chk)
result.chk.Reset()
result.giveBackCh <- result.chk
if chk.NumRows() > 0 {
e.isChildReturnEmpty = false
return nil
}
}
}
// unparallelExec executes hash aggregation algorithm in single thread.
func (e *HashAggExec) unparallelExec(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
for {
if e.prepared {
// Since we return e.MaxChunkSize() rows every time, so we should not traverse
// `groupSet` because of its randomness.
for ; e.cursor4GroupKey < len(e.groupKeys); e.cursor4GroupKey++ {
partialResults := e.getPartialResults(e.groupKeys[e.cursor4GroupKey])
if len(e.PartialAggFuncs) == 0 {
chk.SetNumVirtualRows(chk.NumRows() + 1)
}
for i, af := range e.PartialAggFuncs {
if err := af.AppendFinalResult2Chunk(e.Ctx(), partialResults[i], chk); err != nil {
return err
}
}
if chk.IsFull() {
e.cursor4GroupKey++
return nil
}
}
e.resetSpillMode()
}
if e.executed {
return nil
}
if err := e.execute(ctx); err != nil {
return err
}
if (len(e.groupSet.StringSet) == 0) && len(e.GroupByItems) == 0 {
// If no groupby and no data, we should add an empty group.
// For example:
// "select count(c) from t;" should return one row [0]
// "select count(c) from t group by c1;" should return empty result set.
e.memTracker.Consume(e.groupSet.Insert(""))
e.groupKeys = append(e.groupKeys, "")
}
e.prepared = true
}
}
func (e *HashAggExec) resetSpillMode() {
e.cursor4GroupKey, e.groupKeys = 0, e.groupKeys[:0]
var setSize int64
e.groupSet, setSize = set.NewStringSetWithMemoryUsage()
e.partialResultMap = make(aggPartialResultMapper)
e.bInMap = 0
e.prepared = false
e.executed = e.numOfSpilledChks == e.listInDisk.NumChunks() // No data is spilling again, all data have been processed.
e.numOfSpilledChks = e.listInDisk.NumChunks()
e.memTracker.ReplaceBytesUsed(setSize)
atomic.StoreUint32(&e.inSpillMode, 0)
}
// execute fetches Chunks from src and update each aggregate function for each row in Chunk.
func (e *HashAggExec) execute(ctx context.Context) (err error) {
defer func() {
if e.tmpChkForSpill.NumRows() > 0 && err == nil {
err = e.listInDisk.Add(e.tmpChkForSpill)
e.tmpChkForSpill.Reset()
}
}()
for {
mSize := e.childResult.MemoryUsage()
if err := e.getNextChunk(ctx); err != nil {
return err
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
failpoint.Inject("unparallelHashAggError", func(val failpoint.Value) {
if val, _ := val.(bool); val {
failpoint.Return(errors.New("HashAggExec.unparallelExec error"))
}
})
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
e.groupKeyBuffer, err = getGroupKey(e.Ctx(), e.childResult, e.groupKeyBuffer, e.GroupByItems)
if err != nil {
return err
}
allMemDelta := int64(0)
sel := make([]int, 0, e.childResult.NumRows())
var tmpBuf [1]chunk.Row
for j := 0; j < e.childResult.NumRows(); j++ {
groupKey := string(e.groupKeyBuffer[j]) // do memory copy here, because e.groupKeyBuffer may be reused.
if !e.groupSet.Exist(groupKey) {
if atomic.LoadUint32(&e.inSpillMode) == 1 && e.groupSet.Count() > 0 {
sel = append(sel, j)
continue
}
allMemDelta += e.groupSet.Insert(groupKey)
e.groupKeys = append(e.groupKeys, groupKey)
}
partialResults := e.getPartialResults(groupKey)
for i, af := range e.PartialAggFuncs {
tmpBuf[0] = e.childResult.GetRow(j)
memDelta, err := af.UpdatePartialResult(e.Ctx(), tmpBuf[:], partialResults[i])
if err != nil {
return err
}
allMemDelta += memDelta
}
}
// spill unprocessed data when exceeded.
if len(sel) > 0 {
e.childResult.SetSel(sel)
err = e.spillUnprocessedData(len(sel) == cap(sel))
if err != nil {
return err
}
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(allMemDelta)
}
}
func (e *HashAggExec) spillUnprocessedData(isFullChk bool) (err error) {
if isFullChk {
return e.listInDisk.Add(e.childResult)
}
for i := 0; i < e.childResult.NumRows(); i++ {
e.tmpChkForSpill.AppendRow(e.childResult.GetRow(i))
if e.tmpChkForSpill.IsFull() {
err = e.listInDisk.Add(e.tmpChkForSpill)
if err != nil {
return err
}
e.tmpChkForSpill.Reset()
}
}
return nil
}
func (e *HashAggExec) getNextChunk(ctx context.Context) (err error) {
e.childResult.Reset()
if !e.isChildDrained {
if err := Next(ctx, e.Children(0), e.childResult); err != nil {
return err
}
if e.childResult.NumRows() != 0 {
return nil
}
e.isChildDrained = true
}
if e.offsetOfSpilledChks < e.numOfSpilledChks {
e.childResult, err = e.listInDisk.GetChunk(e.offsetOfSpilledChks)
if err != nil {
return err
}
e.offsetOfSpilledChks++
}
return nil
}
func (e *HashAggExec) getPartialResults(groupKey string) []aggfuncs.PartialResult {
partialResults, ok := e.partialResultMap[groupKey]
allMemDelta := int64(0)
if !ok {
partialResults = make([]aggfuncs.PartialResult, 0, len(e.PartialAggFuncs))
for _, af := range e.PartialAggFuncs {
partialResult, memDelta := af.AllocPartialResult()
partialResults = append(partialResults, partialResult)
allMemDelta += memDelta
}
// Map will expand when count > bucketNum * loadFactor. The memory usage will doubled.
if len(e.partialResultMap)+1 > (1<<e.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
e.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice * (1 << e.bInMap))
e.bInMap++
}
e.partialResultMap[groupKey] = partialResults
allMemDelta += int64(len(groupKey))
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(allMemDelta)
return partialResults
}
func (e *HashAggExec) initRuntimeStats() {
if e.RuntimeStats() != nil {
stats := &HashAggRuntimeStats{
PartialConcurrency: e.Ctx().GetSessionVars().HashAggPartialConcurrency(),
FinalConcurrency: e.Ctx().GetSessionVars().HashAggFinalConcurrency(),
}
stats.PartialStats = make([]*AggWorkerStat, 0, stats.PartialConcurrency)
stats.FinalStats = make([]*AggWorkerStat, 0, stats.FinalConcurrency)
e.stats = stats
}
}
// HashAggRuntimeStats record the HashAggExec runtime stat
type HashAggRuntimeStats struct {
PartialConcurrency int
PartialWallTime int64
FinalConcurrency int
FinalWallTime int64
PartialStats []*AggWorkerStat
FinalStats []*AggWorkerStat
}
// AggWorkerInfo contains the agg worker information.
type AggWorkerInfo struct {
Concurrency int
WallTime int64
}
// AggWorkerStat record the AggWorker runtime stat
type AggWorkerStat struct {
TaskNum int64
WaitTime int64
ExecTime int64
WorkerTime int64
}
// Clone implements the RuntimeStats interface.
func (w *AggWorkerStat) Clone() *AggWorkerStat {
return &AggWorkerStat{
TaskNum: w.TaskNum,
WaitTime: w.WaitTime,
ExecTime: w.ExecTime,
WorkerTime: w.WorkerTime,
}
}
func (*HashAggRuntimeStats) workerString(buf *bytes.Buffer, prefix string, concurrency int, wallTime int64, workerStats []*AggWorkerStat) {
var totalTime, totalWait, totalExec, totalTaskNum int64
for _, w := range workerStats {
totalTime += w.WorkerTime
totalWait += w.WaitTime
totalExec += w.ExecTime
totalTaskNum += w.TaskNum
}
buf.WriteString(prefix)
fmt.Fprintf(buf, "_worker:{wall_time:%s, concurrency:%d, task_num:%d, tot_wait:%s, tot_exec:%s, tot_time:%s",
time.Duration(wallTime), concurrency, totalTaskNum, time.Duration(totalWait), time.Duration(totalExec), time.Duration(totalTime))
n := len(workerStats)
if n > 0 {
slices.SortFunc(workerStats, func(i, j *AggWorkerStat) int { return cmp.Compare(i.WorkerTime, j.WorkerTime) })
fmt.Fprintf(buf, ", max:%v, p95:%v",
time.Duration(workerStats[n-1].WorkerTime), time.Duration(workerStats[n*19/20].WorkerTime))
}
buf.WriteString("}")
}
// String implements the RuntimeStats interface.
func (e *HashAggRuntimeStats) String() string {
buf := bytes.NewBuffer(make([]byte, 0, 64))
e.workerString(buf, "partial", e.PartialConcurrency, atomic.LoadInt64(&e.PartialWallTime), e.PartialStats)
buf.WriteString(", ")
e.workerString(buf, "final", e.FinalConcurrency, atomic.LoadInt64(&e.FinalWallTime), e.FinalStats)
return buf.String()
}
// Clone implements the RuntimeStats interface.
func (e *HashAggRuntimeStats) Clone() execdetails.RuntimeStats {
newRs := &HashAggRuntimeStats{
PartialConcurrency: e.PartialConcurrency,
PartialWallTime: atomic.LoadInt64(&e.PartialWallTime),
FinalConcurrency: e.FinalConcurrency,
FinalWallTime: atomic.LoadInt64(&e.FinalWallTime),
PartialStats: make([]*AggWorkerStat, 0, e.PartialConcurrency),
FinalStats: make([]*AggWorkerStat, 0, e.FinalConcurrency),
}
for _, s := range e.PartialStats {
newRs.PartialStats = append(newRs.PartialStats, s.Clone())
}
for _, s := range e.FinalStats {
newRs.FinalStats = append(newRs.FinalStats, s.Clone())
}
return newRs
}
// Merge implements the RuntimeStats interface.
func (e *HashAggRuntimeStats) Merge(other execdetails.RuntimeStats) {
tmp, ok := other.(*HashAggRuntimeStats)
if !ok {
return
}
atomic.AddInt64(&e.PartialWallTime, atomic.LoadInt64(&tmp.PartialWallTime))
atomic.AddInt64(&e.FinalWallTime, atomic.LoadInt64(&tmp.FinalWallTime))
e.PartialStats = append(e.PartialStats, tmp.PartialStats...)
e.FinalStats = append(e.FinalStats, tmp.FinalStats...)
}
// Tp implements the RuntimeStats interface.
func (*HashAggRuntimeStats) Tp() int {
return execdetails.TpHashAggRuntimeStat
}
// StreamAggExec deals with all the aggregate functions.
// It assumes all the input data is sorted by group by key.
// When Next() is called, it will return a result for the same group.
type StreamAggExec struct {
exec.BaseExecutor
executed bool
// isChildReturnEmpty indicates whether the child executor only returns an empty input.
isChildReturnEmpty bool
defaultVal *chunk.Chunk
groupChecker *vecgroupchecker.VecGroupChecker
inputIter *chunk.Iterator4Chunk
inputRow chunk.Row
aggFuncs []aggfuncs.AggFunc
partialResults []aggfuncs.PartialResult
groupRows []chunk.Row
childResult *chunk.Chunk
memTracker *memory.Tracker // track memory usage.
// memUsageOfInitialPartialResult indicates the memory usage of all partial results after initialization.
// All partial results will be reset after processing one group data, and the memory usage should also be reset.
// We can't get memory delta from ResetPartialResult, so record the memory usage here.
memUsageOfInitialPartialResult int64
}
// Open implements the Executor Open interface.
func (e *StreamAggExec) Open(ctx context.Context) error {
failpoint.Inject("mockStreamAggExecBaseExecutorOpenReturnedError", func(val failpoint.Value) {
if val, _ := val.(bool); val {
failpoint.Return(errors.New("mock StreamAggExec.baseExecutor.Open returned error"))
}
})
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
// If panic in Open, the children executor should be closed because they are open.
defer closeBaseExecutor(&e.BaseExecutor)
e.childResult = tryNewCacheChunk(e.Children(0))
e.executed = false
e.isChildReturnEmpty = true
e.inputIter = chunk.NewIterator4Chunk(e.childResult)
e.inputRow = e.inputIter.End()
e.partialResults = make([]aggfuncs.PartialResult, 0, len(e.aggFuncs))
for _, aggFunc := range e.aggFuncs {
partialResult, memDelta := aggFunc.AllocPartialResult()
e.partialResults = append(e.partialResults, partialResult)
e.memUsageOfInitialPartialResult += memDelta
}
if e.memTracker != nil {
e.memTracker.Reset()
} else {
// bytesLimit <= 0 means no limit, for now we just track the memory footprint
e.memTracker = memory.NewTracker(e.ID(), -1)
}
if e.Ctx().GetSessionVars().TrackAggregateMemoryUsage {
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(e.childResult.MemoryUsage() + e.memUsageOfInitialPartialResult)
return nil
}
// Close implements the Executor Close interface.
func (e *StreamAggExec) Close() error {
if e.childResult != nil {
e.memTracker.Consume(-e.childResult.MemoryUsage() - e.memUsageOfInitialPartialResult)
e.childResult = nil
}
e.groupChecker.Reset()
return e.BaseExecutor.Close()
}
// Next implements the Executor Next interface.
func (e *StreamAggExec) Next(ctx context.Context, req *chunk.Chunk) (err error) {
req.Reset()
for !e.executed && !req.IsFull() {
err = e.consumeOneGroup(ctx, req)
if err != nil {
e.executed = true
return err
}
}
return nil
}
func (e *StreamAggExec) consumeOneGroup(ctx context.Context, chk *chunk.Chunk) (err error) {
if e.groupChecker.IsExhausted() {
if err = e.consumeCurGroupRowsAndFetchChild(ctx, chk); err != nil {
return err
}
if e.executed {
return nil
}
_, err := e.groupChecker.SplitIntoGroups(e.childResult)
if err != nil {
return err
}
}
begin, end := e.groupChecker.GetNextGroup()
for i := begin; i < end; i++ {
e.groupRows = append(e.groupRows, e.childResult.GetRow(i))
}
for meetLastGroup := end == e.childResult.NumRows(); meetLastGroup; {
meetLastGroup = false
if err = e.consumeCurGroupRowsAndFetchChild(ctx, chk); err != nil || e.executed {
return err
}
isFirstGroupSameAsPrev, err := e.groupChecker.SplitIntoGroups(e.childResult)
if err != nil {
return err
}
if isFirstGroupSameAsPrev {
begin, end = e.groupChecker.GetNextGroup()
for i := begin; i < end; i++ {
e.groupRows = append(e.groupRows, e.childResult.GetRow(i))
}
meetLastGroup = end == e.childResult.NumRows()
}
}
err = e.consumeGroupRows()
if err != nil {
return err
}
return e.appendResult2Chunk(chk)
}
func (e *StreamAggExec) consumeGroupRows() error {
if len(e.groupRows) == 0 {
return nil
}
allMemDelta := int64(0)
for i, aggFunc := range e.aggFuncs {
memDelta, err := aggFunc.UpdatePartialResult(e.Ctx(), e.groupRows, e.partialResults[i])
if err != nil {
return err
}
allMemDelta += memDelta
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(allMemDelta)
e.groupRows = e.groupRows[:0]
return nil
}
func (e *StreamAggExec) consumeCurGroupRowsAndFetchChild(ctx context.Context, chk *chunk.Chunk) (err error) {
// Before fetching a new batch of input, we should consume the last group.
err = e.consumeGroupRows()
if err != nil {
return err
}
mSize := e.childResult.MemoryUsage()
err = Next(ctx, e.Children(0), e.childResult)
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
// No more data.
if e.childResult.NumRows() == 0 {
if !e.isChildReturnEmpty {
err = e.appendResult2Chunk(chk)
} else if e.defaultVal != nil {
chk.Append(e.defaultVal, 0, 1)
}
e.executed = true
return err
}
// Reach here, "e.childrenResults[0].NumRows() > 0" is guaranteed.
e.isChildReturnEmpty = false
e.inputRow = e.inputIter.Begin()
return nil
}
// appendResult2Chunk appends result of all the aggregation functions to the
// result chunk, and reset the evaluation context for each aggregation.
func (e *StreamAggExec) appendResult2Chunk(chk *chunk.Chunk) error {
for i, aggFunc := range e.aggFuncs {
err := aggFunc.AppendFinalResult2Chunk(e.Ctx(), e.partialResults[i], chk)
if err != nil {
return err
}
aggFunc.ResetPartialResult(e.partialResults[i])
}
failpoint.Inject("ConsumeRandomPanic", nil)
// All partial results have been reset, so reset the memory usage.
e.memTracker.ReplaceBytesUsed(e.childResult.MemoryUsage() + e.memUsageOfInitialPartialResult)
if len(e.aggFuncs) == 0 {
chk.SetNumVirtualRows(chk.NumRows() + 1)
}
return nil
}
// ActionSpill returns a AggSpillDiskAction for spilling intermediate data for hashAgg.
func (e *HashAggExec) ActionSpill() *AggSpillDiskAction {
if e.spillAction == nil {
e.spillAction = &AggSpillDiskAction{
e: e,
}
}
return e.spillAction
}
// maxSpillTimes indicates how many times the data can spill at most.
const maxSpillTimes = 10
// AggSpillDiskAction implements memory.ActionOnExceed for unparalleled HashAgg.
// If the memory quota of a query is exceeded, AggSpillDiskAction.Action is
// triggered.
type AggSpillDiskAction struct {
memory.BaseOOMAction
e *HashAggExec
spillTimes uint32
}
// Action set HashAggExec spill mode.
func (a *AggSpillDiskAction) Action(t *memory.Tracker) {
// Guarantee that processed data is at least 20% of the threshold, to avoid spilling too frequently.
if atomic.LoadUint32(&a.e.inSpillMode) == 0 && a.spillTimes < maxSpillTimes && a.e.memTracker.BytesConsumed() >= t.GetBytesLimit()/5 {
a.spillTimes++
logutil.BgLogger().Info("memory exceeds quota, set aggregate mode to spill-mode",
zap.Uint32("spillTimes", a.spillTimes),
zap.Int64("consumed", t.BytesConsumed()),
zap.Int64("quota", t.GetBytesLimit()))
atomic.StoreUint32(&a.e.inSpillMode, 1)
memory.QueryForceDisk.Add(1)
return
}
if fallback := a.GetFallback(); fallback != nil {
fallback.Action(t)
}
}
// GetPriority get the priority of the Action
func (*AggSpillDiskAction) GetPriority() int64 {
return memory.DefSpillPriority
}
|
package main
import "fmt"
func main() {
var name string
name = "Mohd Zhuhry"
fmt.Println(name)
name = "Muhammad Zhuhry"
fmt.Println((name))
var friendName = "Budi"
fmt.Println(friendName)
var age = 23
fmt.Println(age)
country := "indonesia"
fmt.Println(country)
var (
firstName = "Muhammad"
lastName = "Zhuhry"
)
fmt.Println(firstName, lastName)
first, last := "budi", "bambang"
fmt.Println(first, last)
}
|
package raftor
import (
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"golang.org/x/net/context"
)
// Commit is used to send to the cluster to save either a snapshot or log entries.
type Commit struct {
State RaftState
Entries []raftpb.Entry
Snapshot raftpb.Snapshot
Messages []raftpb.Message
Context context.Context
}
// RaftState describes the state of the Raft cluster for each commit
type RaftState struct {
CommitID uint64
Vote uint64
Term uint64
Lead uint64
LastLeadElectionTime time.Time
RaftState raft.StateType
}
// Applier applies either a snapshot or entries in a Commit object
type Applier interface {
// Apply processes commit messages after being processed by Raft
Apply(Commit)
}
|
package manifests
import (
"crypto/x509"
"encoding/pem"
"fmt"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/installconfig"
)
var (
additionalTrustBundleConfigFileName = filepath.Join(manifestDir, "user-ca-bundle-config.yaml")
)
const (
additionalTrustBundleConfigDataKey = "ca-bundle.crt"
additionalTrustBundleConfigMapName = "user-ca-bundle"
)
// AdditionalTrustBundleConfig generates the additional-trust-bundle-config.yaml files.
type AdditionalTrustBundleConfig struct {
ConfigMap *corev1.ConfigMap
File *asset.File
}
var _ asset.WritableAsset = (*AdditionalTrustBundleConfig)(nil)
// Name returns a human friendly name for the asset.
func (*AdditionalTrustBundleConfig) Name() string {
return "Additional Trust Bundle Config"
}
// Dependencies returns all of the dependencies directly needed to generate
// the asset.
func (*AdditionalTrustBundleConfig) Dependencies() []asset.Asset {
return []asset.Asset{
&installconfig.InstallConfig{},
}
}
// Generate generates the CloudProviderConfig.
func (atbc *AdditionalTrustBundleConfig) Generate(dependencies asset.Parents) error {
installConfig := &installconfig.InstallConfig{}
dependencies.Get(installConfig)
if installConfig.Config.AdditionalTrustBundle == "" {
return nil
}
data, err := ParseCertificates(installConfig.Config.AdditionalTrustBundle)
if err != nil {
return err
}
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "openshift-config",
Name: additionalTrustBundleConfigMapName,
},
Data: data,
}
cmData, err := yaml.Marshal(cm)
if err != nil {
return errors.Wrapf(err, "failed to create %s manifest", atbc.Name())
}
atbc.ConfigMap = cm
atbc.File = &asset.File{
Filename: additionalTrustBundleConfigFileName,
Data: cmData,
}
return nil
}
// Files returns the files generated by the asset.
func (atbc *AdditionalTrustBundleConfig) Files() []*asset.File {
if atbc.File != nil {
return []*asset.File{atbc.File}
}
return []*asset.File{}
}
// Load loads the already-rendered files back from disk.
func (atbc *AdditionalTrustBundleConfig) Load(f asset.FileFetcher) (bool, error) {
return false, nil
}
// ParseCertificates parses and verifies a PEM certificate bundle
func ParseCertificates(certificates string) (map[string]string, error) {
rest := []byte(certificates)
var sb strings.Builder
for {
var block *pem.Block
block, rest = pem.Decode(rest)
if block == nil {
return nil, fmt.Errorf("unable to parse certificate, please check the additionalTrustBundle section of install-config.yaml")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
if cert.Version < 3 {
logrus.Warnf("Certificate %X from additionalTrustBundle is x509 v%d", cert.SerialNumber, cert.Version)
} else if !cert.IsCA {
logrus.Warnf("Certificate %X from additionalTrustBundle is x509 v%d but not a certificate authority", cert.SerialNumber, cert.Version)
}
sb.WriteString(string(pem.EncodeToMemory(block)))
if len(rest) == 0 {
break
}
}
return map[string]string{additionalTrustBundleConfigDataKey: sb.String()}, nil
}
|
package main
import (
"log"
"os"
"strings"
)
const usage = `
usage:
tasks insert "my new task"
tasks list
tasks update <task-id> true|false
tasks purge
`
func main() {
//create a new logger with no date/time prefix.
//Use this to write responses back to the terminal.
//Use logger.Fatalf() to log a fatal message and
//exit with a non-zero status code.
//use logger.Printf() to write other messages.
logger := log.New(os.Stdout, "", 0)
if len(os.Args) < 2 {
logger.Fatal(usage)
}
//command will be one of:
//insert | list | update | purge
command := strings.ToLower(os.Args[1])
//TODO: read the following required env vars.
//if they are not defined, use logger.Fatalf()
//to tell the user to define them and exit
// - MYSQL_ADDR = network address of the MySQL server (127.0.0.1:3306)
// - MYSQL_ROOT_PASSWORD = password for the root user account
// - MYSQL_DATABASE = name of database containing our tasks table
//TODO: connect to the MySQL server using the information
//gathered from those environment variables.
//see https://drstearns.github.io/tutorials/godb/#secconnectingfromagoprogram
//PRO TIP: the mysql driver in particular has a config
//struct you can use to build the DSN.
//see https://godoc.org/github.com/go-sql-driver/mysql#Config
//and https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN
//(other drivers may not have something like that)
//TODO: once connected, create a new tasks.MySQLStore
//and use it to implement the various commands
switch command {
case "insert":
//TODO: get the new task title from os.Args[2],
//insert it, and log the new ID or any errors
case "list":
//TODO: get all the tasks and log them, one per line,
//using the following format:
//<ID>\t<Completed>\t<Title>
//For example:
//1 false get milk
//2 false walk the cat
case "update":
//TODO: update the task's completed state
//using os.Args[2] as the task ID
//and os.Args[3] as the new completed value
//log the task returned from your store's Update method
//just like you did in the "list" command
case "purge":
//TODO: purge all completed tasks and log
//how many tasks were deleted
default:
logger.Fatal(usage)
}
}
|
package core
import "context"
type behavior interface {
AddNext(next behavior) behavior
Run(ctx context.Context, request Request) Result
Next() Result
setParameters(ctx context.Context, request Request, handler RequestHandler)
}
type Middleware struct {
ctx context.Context
request Request
handler RequestHandler
next behavior
}
func (m *Middleware) AddNext(next behavior) behavior {
m.next = next
return m.next
}
func (m *Middleware) Next() Result {
if err := m.ctx.Err(); err != nil {
return Result{E: err}
}
if m.next != nil {
m.next.setParameters(m.ctx, m.request, m.handler)
return m.next.Run(m.ctx, m.request)
} else {
return m.handler(m.ctx, m.request)
}
}
func (m *Middleware) setParameters(ctx context.Context, request Request, handler RequestHandler) {
m.ctx = ctx
m.request = request
m.handler = handler
}
|
package file
import . "github.com/rainmyy/easyDB/library/strategy"
func ParserJsonContent(data []byte) ([]*TreeStruct, error) {
return nil, nil
}
|
package problem0507
func checkPerfectNumber(num int) bool {
if num == 1 {
return false
}
sum := 1
i := 2
for ; i*i < num; i++ {
if num%i == 0 {
sum += num/i + i
}
}
if i*i == num {
sum += i
}
return sum == num
}
|
// Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard
import (
"context"
"time"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/vouch/services/accountmanager"
"github.com/attestantio/vouch/services/attestationaggregator"
"github.com/attestantio/vouch/services/attester"
"github.com/attestantio/vouch/services/beaconblockproposer"
"github.com/attestantio/vouch/services/beaconcommitteesubscriber"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/scheduler"
"github.com/attestantio/vouch/services/synccommitteeaggregator"
"github.com/attestantio/vouch/services/synccommitteemessenger"
"github.com/attestantio/vouch/services/synccommitteesubscriber"
"github.com/pkg/errors"
"github.com/rs/zerolog"
)
type parameters struct {
logLevel zerolog.Level
monitor metrics.ControllerMonitor
specProvider eth2client.SpecProvider
forkScheduleProvider eth2client.ForkScheduleProvider
chainTimeService chaintime.Service
proposerDutiesProvider eth2client.ProposerDutiesProvider
attesterDutiesProvider eth2client.AttesterDutiesProvider
syncCommitteeDutiesProvider eth2client.SyncCommitteeDutiesProvider
syncCommitteesSubscriber synccommitteesubscriber.Service
validatingAccountsProvider accountmanager.ValidatingAccountsProvider
scheduler scheduler.Service
eventsProvider eth2client.EventsProvider
attester attester.Service
syncCommitteeMessenger synccommitteemessenger.Service
syncCommitteeAggregator synccommitteeaggregator.Service
beaconBlockProposer beaconblockproposer.Service
attestationAggregator attestationaggregator.Service
beaconCommitteeSubscriber beaconcommitteesubscriber.Service
accountsRefresher accountmanager.Refresher
maxAttestationDelay time.Duration
maxSyncCommitteeMessageDelay time.Duration
reorgs bool
}
// Parameter is the interface for service parameters.
type Parameter interface {
apply(*parameters)
}
type parameterFunc func(*parameters)
func (f parameterFunc) apply(p *parameters) {
f(p)
}
// WithLogLevel sets the log level for the module.
func WithLogLevel(logLevel zerolog.Level) Parameter {
return parameterFunc(func(p *parameters) {
p.logLevel = logLevel
})
}
// WithMonitor sets the monitor for the module.
func WithMonitor(monitor metrics.ControllerMonitor) Parameter {
return parameterFunc(func(p *parameters) {
p.monitor = monitor
})
}
// WithSpecProvider sets the spec provider.
func WithSpecProvider(provider eth2client.SpecProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.specProvider = provider
})
}
// WithForkScheduleProvider sets the fork schedule provider.
func WithForkScheduleProvider(provider eth2client.ForkScheduleProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.forkScheduleProvider = provider
})
}
// WithChainTimeService sets the chain time service.
func WithChainTimeService(service chaintime.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.chainTimeService = service
})
}
// WithProposerDutiesProvider sets the proposer duties provider.
func WithProposerDutiesProvider(provider eth2client.ProposerDutiesProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.proposerDutiesProvider = provider
})
}
// WithAttesterDutiesProvider sets the attester duties provider.
func WithAttesterDutiesProvider(provider eth2client.AttesterDutiesProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.attesterDutiesProvider = provider
})
}
// WithSyncCommitteeDutiesProvider sets the sync committee duties provider.
func WithSyncCommitteeDutiesProvider(provider eth2client.SyncCommitteeDutiesProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeDutiesProvider = provider
})
}
// WithSyncCommitteeSubscriber sets the sync committee subscriber.
func WithSyncCommitteeSubscriber(subscriber synccommitteesubscriber.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteesSubscriber = subscriber
})
}
// WithEventsProvider sets the events provider.
func WithEventsProvider(provider eth2client.EventsProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.eventsProvider = provider
})
}
// WithValidatingAccountsProvider sets the validating accounts provider.
func WithValidatingAccountsProvider(provider accountmanager.ValidatingAccountsProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.validatingAccountsProvider = provider
})
}
// WithScheduler sets the scheduler.
func WithScheduler(scheduler scheduler.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.scheduler = scheduler
})
}
// WithAttester sets the attester.
func WithAttester(attester attester.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.attester = attester
})
}
// WithSyncCommitteeMessenger sets the sync committee messenger.
func WithSyncCommitteeMessenger(messenger synccommitteemessenger.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeMessenger = messenger
})
}
// WithSyncCommitteeAggregator sets the sync committee aggregator.
func WithSyncCommitteeAggregator(aggregator synccommitteeaggregator.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.syncCommitteeAggregator = aggregator
})
}
// WithBeaconBlockProposer sets the beacon block propser.
func WithBeaconBlockProposer(proposer beaconblockproposer.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.beaconBlockProposer = proposer
})
}
// WithAttestationAggregator sets the attestation aggregator.
func WithAttestationAggregator(aggregator attestationaggregator.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.attestationAggregator = aggregator
})
}
// WithBeaconCommitteeSubscriber sets the beacon committee subscriber.
func WithBeaconCommitteeSubscriber(subscriber beaconcommitteesubscriber.Service) Parameter {
return parameterFunc(func(p *parameters) {
p.beaconCommitteeSubscriber = subscriber
})
}
// WithAccountsRefresher sets the account refresher.
func WithAccountsRefresher(refresher accountmanager.Refresher) Parameter {
return parameterFunc(func(p *parameters) {
p.accountsRefresher = refresher
})
}
// WithMaxAttestationDelay sets the maximum delay before attesting.
func WithMaxAttestationDelay(delay time.Duration) Parameter {
return parameterFunc(func(p *parameters) {
p.maxAttestationDelay = delay
})
}
// WithMaxSyncCommitteeMessageDelay sets the maximum delay before generating sync committee messages.
func WithMaxSyncCommitteeMessageDelay(delay time.Duration) Parameter {
return parameterFunc(func(p *parameters) {
p.maxSyncCommitteeMessageDelay = delay
})
}
// WithReorgs sets or unsets reorgs.
func WithReorgs(reorgs bool) Parameter {
return parameterFunc(func(p *parameters) {
p.reorgs = reorgs
})
}
// parseAndCheckParameters parses and checks parameters to ensure that mandatory parameters are present and correct.
func parseAndCheckParameters(params ...Parameter) (*parameters, error) {
parameters := parameters{
logLevel: zerolog.GlobalLevel(),
}
for _, p := range params {
if params != nil {
p.apply(¶meters)
}
}
if parameters.monitor == nil {
return nil, errors.New("no monitor specified")
}
if parameters.specProvider == nil {
return nil, errors.New("no spec provider specified")
}
if parameters.forkScheduleProvider == nil {
return nil, errors.New("no fork schedule provider specified")
}
if parameters.chainTimeService == nil {
return nil, errors.New("no chain time service specified")
}
if parameters.proposerDutiesProvider == nil {
return nil, errors.New("no proposer duties provider specified")
}
if parameters.attesterDutiesProvider == nil {
return nil, errors.New("no attester duties provider specified")
}
if parameters.eventsProvider == nil {
return nil, errors.New("no events provider specified")
}
if parameters.validatingAccountsProvider == nil {
return nil, errors.New("no validating accounts provider specified")
}
if parameters.scheduler == nil {
return nil, errors.New("no scheduler service specified")
}
if parameters.attester == nil {
return nil, errors.New("no attester specified")
}
if parameters.beaconBlockProposer == nil {
return nil, errors.New("no beacon block proposer specified")
}
if parameters.attestationAggregator == nil {
return nil, errors.New("no attestation aggregator specified")
}
if parameters.beaconCommitteeSubscriber == nil {
return nil, errors.New("no beacon committee subscriber specified")
}
if parameters.accountsRefresher == nil {
return nil, errors.New("no accounts refresher specified")
}
var spec map[string]interface{}
var err error
if parameters.maxAttestationDelay == 0 {
spec, err = parameters.specProvider.Spec(context.Background())
if err != nil {
return nil, errors.Wrap(err, "failed to obtain spec")
}
tmp, exists := spec["SECONDS_PER_SLOT"]
if !exists {
return nil, errors.New("SECONDS_PER_SLOT not found in spec")
}
slotDuration, ok := tmp.(time.Duration)
if !ok {
return nil, errors.New("SECONDS_PER_SLOT of unexpected type")
}
parameters.maxAttestationDelay = slotDuration / 3
}
if parameters.maxSyncCommitteeMessageDelay == 0 {
if spec == nil {
spec, err = parameters.specProvider.Spec(context.Background())
if err != nil {
return nil, errors.Wrap(err, "failed to obtain spec")
}
}
tmp, exists := spec["SECONDS_PER_SLOT"]
if !exists {
return nil, errors.New("SECONDS_PER_SLOT not found in spec")
}
slotDuration, ok := tmp.(time.Duration)
if !ok {
return nil, errors.New("SECONDS_PER_SLOT of unexpected type")
}
parameters.maxSyncCommitteeMessageDelay = slotDuration / 3
}
// Sync committee duties provider/messenger/aggregator/subscriber are optional so no checks here.
return ¶meters, nil
}
|
/*
Description
As part of an arithmetic competency program, your students will be given randomly generated lists of from 2 to 15 unique positive integers and asked to determine how many items in each list are twice some other item in the same list. You will need a program to help you with the grading. This program should be able to scan the lists and output the correct answer for each one. For example, given the list
1 4 3 2 9 7 18 22
your program should answer 3, as 2 is twice 1, 4 is twice 2, and 18 is twice 9.
Input
The input will consist of one or more lists of numbers. There will be one list of numbers per line. Each list will contain from 2 to 15 unique positive integers. No integer will be larger than 99. Each line will be terminated with the integer 0, which is not considered part of the list. A line with the single number -1 will mark the end of the file. The example input below shows 3 separate lists. Some lists may not contain any doubles.
Output
The output will consist of one line per input list, containing a count of the items that are double some other item.
Sample Input
1 4 3 2 9 7 18 22 0
2 4 8 10 0
7 5 11 13 1 3 0
-1
Sample Output
3
2
0
Source
Mid-Central USA 2003
*/
package main
func main() {
assert(doubles([]int{1, 4, 3, 2, 9, 7, 18, 22}) == 3)
assert(doubles([]int{2, 4, 8, 10}) == 2)
assert(doubles([]int{7, 5, 11, 13, 1, 3}) == 0)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func doubles(a []int) int {
m := make(map[int]bool)
for _, v := range a {
m[v] = true
}
r := 0
for _, v := range a {
if m[v*2] {
r++
}
}
return r
}
|
package main
import (
"fmt"
"html/template"
"log"
"net/http"
)
func HomeHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Please go to /cluster or /graph"))
}
func ClusterHandler(w http.ResponseWriter, r *http.Request) {
tmpl, err := template.ParseFiles("view/cluster.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{}
err = tmpl.Execute(w, data)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func GraphHandler(w http.ResponseWriter, r *http.Request) {
tmpl, err := template.ParseFiles("view/graph.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{}
err = tmpl.Execute(w, data)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func main() {
http.HandleFunc("/", HomeHandler)
http.HandleFunc("/cluster", ClusterHandler)
http.HandleFunc("/graph", GraphHandler)
fmt.Println("Listen and serve :8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
package msutil
import "github.com/mauricelam/genny/generic"
import x "github.com/dearcj/golangproj/network"
import "reflect"
type GenericData generic.Type
var _ x.ServerData
type GenericDataMsg struct {
Changed bool
data *GenericData
backup *GenericData
}
func (n *GenericDataMsg) WriteToMsg() *GenericData {
n.Changed = true
return n.data
}
func (n *GenericDataMsg) Reset() {
*n.data = *n.backup
}
func CreateGenericData() *GenericData {
var x GenericData
g := reflect.New(reflect.TypeOf(x).Elem()).Interface()
return g.(*GenericData)
}
func CreateGenericDataMsg() GenericDataMsg {
return GenericDataMsg{
backup: CreateGenericData(),
data: CreateGenericData()}
}
|
package main
import (
"fmt"
router "./http"
"net/http"
"os"
"path/filepath"
"./controllers"
"./service"
"./repos"
)
var httpRouter router.Router = router.NewChiRouter()
var postRepository repos.PostRepo = repos.NewFirestoreRepository()
var postService service.PostService = service.NewPostService(postRepository)
var postController controllers.PostController = controllers.NewPostController(postService)
func main(){
setEnv()
const port string = ":8000";
httpRouter.GET("/", func(res http.ResponseWriter, req *http.Request){
fmt.Fprintln(res, "Up and running so fast")
})
httpRouter.GET("/posts", postController.GetPosts)
httpRouter.POST("/posts", postController.CreatePost)
httpRouter.SERVE(port)
}
func setEnv(){
cwd, err := os.Getwd()
if err == nil{
fmt.Println("we are here")
envPath := filepath.Join(cwd, "config", "golang-rest-api-project-firebase-adminsdk-zjeqf-35ecb16f4b.json")
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", envPath)
fmt.Println("env set")
return
}
} |
package backend_service
import (
"2021/yunsongcailu/yunsong_server/backend/backend_dao"
"2021/yunsongcailu/yunsong_server/web/web_model"
)
type BackendArticleServer interface {
// 获取所有文章
FindArticleAll() (articleAll []web_model.ArticleModel,err error)
// 根据ID删除文章
RemoveArticleById(id int64) (err error)
// 批量删除文章
RemoveArticleArrById(idArr []int64) (err error)
// 添加文章
AddArticle(article web_model.ArticleModel) (err error)
// 根据ID 获取文章
GetArticleById(id int64) (article web_model.ArticleModel,err error)
// 根据ID 修改文章
EditArticleById(article web_model.ArticleModel) (err error)
// 根据ID 字段 修改文章图片 视频
EditArticleImageOrVideo(id int64,field,path string) (err error)
}
type backendArticleServer struct {}
func NewBackendArticleServer() BackendArticleServer {
return &backendArticleServer{}
}
var bad = backend_dao.NewBackendArticleDao()
// 获取所有文章
func (bas *backendArticleServer) FindArticleAll() (articleAll []web_model.ArticleModel,err error) {
return bad.QueryArticleAll()
}
// 根据ID删除文章
func (bas *backendArticleServer) RemoveArticleById(id int64) (err error) {
return bad.DeleteArticleById(id)
}
// 批量删除文章
func (bas *backendArticleServer) RemoveArticleArrById(idArr []int64) (err error) {
for _,id := range idArr {
err = bad.DeleteArticleById(id)
if err != nil {
return
}
}
return
}
// 添加文章
func (bas *backendArticleServer) AddArticle(article web_model.ArticleModel) (err error) {
return bad.InsertArticle(article)
}
// 根据ID 获取文章
func (bas *backendArticleServer) GetArticleById(id int64) (article web_model.ArticleModel,err error) {
return bad.QueryArticleById(id)
}
// 根据ID 修改文章
func (bas *backendArticleServer) EditArticleById(article web_model.ArticleModel) (err error) {
return bad.UpdateArticleById(article)
}
// 根据ID 字段 修改文章图片 视频
func (bas *backendArticleServer) EditArticleImageOrVideo(id int64,field,path string) (err error) {
return bad.UpdateArticleImageOrVideo(id,field,path)
} |
package worker
import (
"context"
"crypto/sha1"
"fmt"
"io"
"path/filepath"
"time"
"golang.org/x/time/rate"
"github.com/apex/log"
"github.com/gocraft/work"
"github.com/gomodule/redigo/redis"
"gopkg.in/tomb.v2"
"git.scc.kit.edu/sdm/lsdf-checksum/internal/lengthsafe"
"git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle"
"git.scc.kit.edu/sdm/lsdf-checksum/internal/ratedreader"
commonRedis "git.scc.kit.edu/sdm/lsdf-checksum/redis"
"git.scc.kit.edu/sdm/lsdf-checksum/workqueue"
)
type Config struct {
Concurrency int
MaxThroughput int
FileReadSize int
Logger log.Interface `yaml:"-"`
Redis commonRedis.Config
RedisPrefix string
Workqueue workqueue.Config
PrefixTTL time.Duration
PrefixerReapingInterval time.Duration
WorkqueueReapingInterval time.Duration
}
var DefaultConfig = &Config{
Concurrency: 1,
MaxThroughput: 10 * 1024 * 1024,
FileReadSize: 32 * 1024,
PrefixTTL: 30 * time.Minute,
PrefixerReapingInterval: time.Hour,
WorkqueueReapingInterval: time.Hour,
}
type Worker struct {
Config *Config
ctx context.Context
tomb *tomb.Tomb
pool *redis.Pool
workerPool *work.WorkerPool
workqueues *WorkqueuesKeeper
prefixer *Prefixer
localLimiter *rate.Limiter
// Pools
buffers chan []byte
fieldLogger log.Interface
}
func New(config *Config) *Worker {
return &Worker{
Config: config,
}
}
func (w *Worker) Start(ctx context.Context) {
w.fieldLogger = w.Config.Logger.WithFields(log.Fields{
"component": "worker.Worker",
})
limit := rate.Inf
if w.Config.MaxThroughput > 0 {
limit = rate.Limit(w.Config.MaxThroughput)
}
w.localLimiter = rate.NewLimiter(limit, w.Config.FileReadSize)
w.tomb, w.ctx = tomb.WithContext(ctx)
w.prefixer = w.createPrefixer()
w.prefixer.Start(w.tomb.Context(nil))
w.initPools()
w.tomb.Go(func() error {
pool, err := w.createRedisPool()
if err != nil {
return err
}
w.pool = pool
w.workqueues = w.createWorkqueuesKeeper(w.ctx, w.pool)
w.workqueues.Start()
w.tomb.Go(w.runWorkerPool)
return nil
})
}
func (w *Worker) SignalStop() {
w.tomb.Kill(lifecycle.ErrStopSignalled)
}
func (w *Worker) Wait() error {
return w.tomb.Wait()
}
func (w *Worker) Dead() <-chan struct{} {
return w.tomb.Dead()
}
func (w *Worker) Err() error {
return w.tomb.Err()
}
func (w *Worker) runWorkerPool() error {
gocraftWorkNamespace := workqueue.GocraftWorkNamespace(w.Config.RedisPrefix)
w.workerPool = work.NewWorkerPool(workerContext{}, uint(w.Config.Concurrency), gocraftWorkNamespace, w.pool)
w.workerPool.Middleware(
func(workerCtx *workerContext, _ *work.Job, next work.NextMiddlewareFunc) error {
workerCtx.Worker = w
return next()
},
)
w.workerPool.Job(workqueue.ComputeChecksumJobName, (*workerContext).ComputeChecksum)
w.fieldLogger.Info("Starting worker pool")
w.workerPool.Start()
<-w.tomb.Dying()
w.fieldLogger.Info("Stopping worker pool")
w.workerPool.Stop()
w.fieldLogger.Info("Stopped worker pool")
return nil
}
func (w *Worker) createRedisPool() (*redis.Pool, error) {
config := commonRedis.DefaultConfig.
Clone().
Merge(&w.Config.Redis).
Merge(&commonRedis.Config{})
pool, err := commonRedis.CreatePool(config)
if err != nil {
return nil, err
}
return pool, nil
}
func (w *Worker) createWorkqueuesKeeper(ctx context.Context, pool *redis.Pool) *WorkqueuesKeeper {
return NewWorkqueuesKeeper(WorkqueuesKeeperConfig{
Context: ctx,
Pool: pool,
Prefix: w.Config.RedisPrefix,
FileReadSize: w.Config.FileReadSize,
Logger: w.Config.Logger,
Workqueue: w.Config.Workqueue,
ReapingInterval: w.Config.WorkqueueReapingInterval,
})
}
func (w *Worker) createPrefixer() *Prefixer {
return NewPrefixer(&PrefixerConfig{
TTL: w.Config.PrefixTTL,
ReapingInterval: w.Config.PrefixerReapingInterval,
Logger: w.Config.Logger,
})
}
func (w *Worker) initPools() {
w.buffers = make(chan []byte, w.Config.Concurrency)
for i := 0; i < w.Config.Concurrency; i++ {
buffer := make([]byte, w.Config.FileReadSize)
w.buffers <- buffer
}
}
type workerContext struct {
Worker *Worker
buffer []byte
}
func (w *workerContext) ComputeChecksum(job *work.Job) error {
var workPack workqueue.WorkPack
fieldLogger := w.Worker.fieldLogger.WithFields(log.Fields{
"job_name": job.Name,
"job_id": job.ID,
})
err := workPack.FromJobArgs(job.Args)
if err != nil {
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "failing-job",
"args": job.Args,
}).Warn("Encountered error during WorkPack unmarshaling")
return err
}
fieldLogger = fieldLogger.WithFields(log.Fields{
"filesystem": workPack.FileSystemName,
"snapshot": workPack.SnapshotName,
})
fieldLogger.Debug("Starting processing job")
wqCtx, err := w.Worker.workqueues.Get(workPack.FileSystemName, workPack.SnapshotName)
if err != nil {
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "failing-job",
}).Warn("Encountered error while getting workqueue instance")
return err
}
prefix, err := w.Worker.prefixer.Prefix(&workPack)
if err != nil {
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "failing-job",
}).Warn("Encountered error while determining path prefix")
return err
}
var filesBytesRead int64
var writeBackPack workqueue.WriteBackPack
writeBackPack.Files = make([]workqueue.WriteBackPackFile, 0, len(workPack.Files))
// gets expensive objects from pool and stores in w
w.getFromPools()
limiters := [...]*rate.Limiter{
w.Worker.localLimiter,
wqCtx.Limiter,
}
for _, file := range workPack.Files {
path := filepath.Join(prefix, file.Path)
fieldLogger := fieldLogger.WithFields(log.Fields{
"id": file.ID,
"path": path,
})
n, checksum, err := w.readFileAndComputeChecksum(path, limiters[:])
if err != nil {
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "failing-job",
}).Warn("Encountered error while computing checksum of file")
continue
}
fieldLogger.WithFields(log.Fields{
"bytes_read": n,
"checksum": checksum,
}).Debug("Read file and computed checksum")
filesBytesRead += n
writeBackPack.Files = append(writeBackPack.Files, workqueue.WriteBackPackFile{
ID: file.ID,
Checksum: checksum,
})
}
w.returnToPools()
fieldLogger.WithFields(log.Fields{
"files_count": len(writeBackPack.Files),
"files_bytes_read": filesBytesRead,
}).Debug("Enqueuing write back job for compute checksum job")
_, err = wqCtx.WriteBackEnqueuer.Enqueue(&writeBackPack)
if err != nil {
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "failing-job",
"write_back_pack": &writeBackPack,
}).Warn("Encountered error while enqueuing WriteBackPack")
return err
}
return nil
}
func (w *workerContext) readFileAndComputeChecksum(
path string, limiters []*rate.Limiter,
) (int64, []byte, error) {
fileReader, err := lengthsafe.Open(path)
if err != nil {
return 0, nil, fmt.Errorf("opening file: %w", err)
}
defer fileReader.Close()
reader := ratedreader.NewMultiReader(fileReader, limiters[:])
hasher := sha1.New()
n, err := io.CopyBuffer(hasher, reader, w.buffer)
if err != nil {
return 0, nil, fmt.Errorf("reading file and computing checksum: %w", err)
}
err = fileReader.Close()
if err != nil {
return 0, nil, fmt.Errorf("closing file: %w", err)
}
checksum := hasher.Sum(nil)
return n, checksum, nil
}
func (w *workerContext) getFromPools() {
w.buffer = <-w.Worker.buffers
}
func (w *workerContext) returnToPools() {
w.Worker.buffers <- w.buffer
}
|
package arangodb
import (
"context"
common "github.com/Nubes3/common/models/arangodb"
arangoDriver "github.com/arangodb/go-driver"
"time"
)
const ContextExpiredTime = 30
var (
userCol arangoDriver.Collection
)
func InitArangoRepo() {
ctx, cancel := context.WithTimeout(context.Background(), ContextExpiredTime*time.Second)
defer cancel()
exist, err := common.ArangoDb.CollectionExists(ctx, "users")
if err != nil {
panic(err)
}
if !exist {
userCol, _ = common.ArangoDb.CreateCollection(ctx, "users", &arangoDriver.CreateCollectionOptions{})
} else {
userCol, _ = common.ArangoDb.Collection(ctx, "users")
}
}
|
package main
import (
"context"
"log"
"os"
"github.com/joho/godotenv"
"github.com/rbonnat/blockchain-in-go/server"
)
func main() {
var err error
// Fetch environment variables
err = godotenv.Load()
if err != nil {
log.Fatal(err)
}
port := os.Getenv("PORT")
// Launch http server
err = server.Run(context.TODO(), port)
if err != nil {
log.Fatal(err)
}
}
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"github.com/humio/humio-operator/pkg/helpers"
humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
"github.com/humio/humio-operator/pkg/kubernetes"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func constructNginxIngressAnnotations(hc *humiov1alpha1.HumioCluster, hostname string, ingressSpecificAnnotations map[string]string) map[string]string {
annotations := make(map[string]string)
annotations["nginx.ingress.kubernetes.io/configuration-snippet"] = `
more_set_headers "Expect-CT: max-age=604800, enforce";
more_set_headers "Referrer-Policy: no-referrer";
more_set_headers "X-Content-Type-Options: nosniff";
more_set_headers "X-Frame-Options: DENY";
more_set_headers "X-XSS-Protection: 1; mode=block";`
annotations["nginx.ingress.kubernetes.io/cors-allow-credentials"] = "false"
annotations["nginx.ingress.kubernetes.io/cors-allow-headers"] = "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization"
annotations["nginx.ingress.kubernetes.io/cors-allow-methods"] = "GET, PUT, POST, DELETE, PATCH, OPTIONS"
annotations["nginx.ingress.kubernetes.io/cors-allow-origin"] = fmt.Sprintf("https://%s", hostname)
annotations["nginx.ingress.kubernetes.io/enable-cors"] = "true"
annotations["nginx.ingress.kubernetes.io/upstream-vhost"] = hostname
if ingressTLSOrDefault(hc) {
annotations["nginx.ingress.kubernetes.io/force-ssl-redirect"] = "true"
}
if helpers.TLSEnabled(hc) {
annotations["nginx.ingress.kubernetes.io/backend-protocol"] = "HTTPS"
annotations["nginx.ingress.kubernetes.io/proxy-ssl-name"] = fmt.Sprintf("%s.%s", hc.Name, hc.Namespace)
annotations["nginx.ingress.kubernetes.io/proxy-ssl-server-name"] = fmt.Sprintf("%s.%s", hc.Name, hc.Namespace)
annotations["nginx.ingress.kubernetes.io/proxy-ssl-secret"] = fmt.Sprintf("%s/%s", hc.Namespace, hc.Name)
annotations["nginx.ingress.kubernetes.io/proxy-ssl-verify"] = "on"
}
for k, v := range ingressSpecificAnnotations {
annotations[k] = v
}
return annotations
}
func ConstructGeneralIngress(hc *humiov1alpha1.HumioCluster, hostname string) *networkingv1.Ingress {
annotations := make(map[string]string)
annotations["nginx.ingress.kubernetes.io/proxy-body-size"] = "512m"
annotations["nginx.ingress.kubernetes.io/proxy-http-version"] = "1.1"
annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = "25"
return constructIngress(
hc,
fmt.Sprintf("%s-general", hc.Name),
hostname,
[]string{humioPathOrDefault(hc)},
HumioPort,
certificateSecretNameOrDefault(hc),
constructNginxIngressAnnotations(hc, hostname, annotations),
)
}
func ConstructStreamingQueryIngress(hc *humiov1alpha1.HumioCluster, hostname string) *networkingv1.Ingress {
annotations := make(map[string]string)
annotations["nginx.ingress.kubernetes.io/proxy-body-size"] = "512m"
annotations["nginx.ingress.kubernetes.io/proxy-http-version"] = "1.1"
annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = "4h"
annotations["nginx.ingress.kubernetes.io/use-regex"] = "true"
annotations["nginx.ingress.kubernetes.io/proxy-buffering"] = "off"
return constructIngress(
hc,
fmt.Sprintf("%s-streaming-query", hc.Name),
hostname,
[]string{fmt.Sprintf("%sapi/v./(dataspaces|repositories)/[^/]+/query$", humioPathOrDefault(hc))},
HumioPort,
certificateSecretNameOrDefault(hc),
constructNginxIngressAnnotations(hc, hostname, annotations),
)
}
func ConstructIngestIngress(hc *humiov1alpha1.HumioCluster, hostname string) *networkingv1.Ingress {
annotations := make(map[string]string)
annotations["nginx.ingress.kubernetes.io/proxy-body-size"] = "512m"
annotations["nginx.ingress.kubernetes.io/proxy-http-version"] = "1.1"
annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = "90"
annotations["nginx.ingress.kubernetes.io/use-regex"] = "true"
return constructIngress(
hc,
fmt.Sprintf("%s-ingest", hc.Name),
hostname,
[]string{
fmt.Sprintf("%sapi/v./(dataspaces|repositories)/[^/]+/(ingest|logplex)", humioPathOrDefault(hc)),
fmt.Sprintf("%sapi/v1/ingest", humioPathOrDefault(hc)),
fmt.Sprintf("%sservices/collector", humioPathOrDefault(hc)),
fmt.Sprintf("%s_bulk", humioPathOrDefault(hc)),
},
HumioPort,
certificateSecretNameOrDefault(hc),
constructNginxIngressAnnotations(hc, hostname, annotations),
)
}
func ConstructESIngestIngress(hc *humiov1alpha1.HumioCluster, esHostname string) *networkingv1.Ingress {
annotations := make(map[string]string)
annotations["nginx.ingress.kubernetes.io/proxy-body-size"] = "512m"
annotations["nginx.ingress.kubernetes.io/proxy-http-version"] = "1.1"
annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = "90"
return constructIngress(
hc,
fmt.Sprintf("%s-es-ingest", hc.Name),
esHostname,
[]string{humioPathOrDefault(hc)},
elasticPort,
esCertificateSecretNameOrDefault(hc),
constructNginxIngressAnnotations(hc, esHostname, annotations),
)
}
func constructIngress(hc *humiov1alpha1.HumioCluster, name string, hostname string, paths []string, port int, secretName string, annotations map[string]string) *networkingv1.Ingress {
var httpIngressPaths []networkingv1.HTTPIngressPath
pathTypeImplementationSpecific := networkingv1.PathTypeImplementationSpecific
for _, path := range paths {
httpIngressPaths = append(httpIngressPaths, networkingv1.HTTPIngressPath{
Path: path,
PathType: &pathTypeImplementationSpecific,
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: (*ConstructService(NewHumioNodeManagerFromHumioCluster(hc))).Name,
Port: networkingv1.ServiceBackendPort{
Number: int32(port),
},
},
},
})
}
ingress := networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: hc.Namespace,
Annotations: annotations,
Labels: kubernetes.MatchingLabelsForHumio(hc.Name),
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: hostname,
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: httpIngressPaths,
},
},
},
},
},
}
if ingressTLSOrDefault(hc) {
ingress.Spec.TLS = []networkingv1.IngressTLS{
{
Hosts: []string{hostname},
SecretName: secretName,
},
}
}
for k, v := range hc.Spec.Ingress.Annotations {
ingress.ObjectMeta.Annotations[k] = v
}
return &ingress
}
|
package nebulatest
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
nebula "github.com/vesoft-inc/nebula-go"
"github.com/vesoft-inc/nebula-go/graph"
)
const (
testPrefix = "=== test"
inPrefix = "--- in"
outPrefix = "--- out"
)
type Tester struct {
client *nebula.GraphClient
err error
}
func NewTester(client *nebula.GraphClient) *Tester {
return &Tester{
client: client,
}
}
func (tester *Tester) Parse(filename string) {
b, err := ioutil.ReadFile(filename)
if err != nil {
log.Printf("Invalid file: %s", filename)
return
}
scanner := bufio.NewScanner(strings.NewReader(string(b)))
// TODO(yee): Use FSM to implement parse
var inBuf, outBuf bytes.Buffer
var testName string
var response *graph.ExecutionResponse
var differ Differ
var wait time.Duration
isInput, isOutput := false, false
for scanner.Scan() {
text := scanner.Text()
if strings.HasPrefix(text, testPrefix) {
if isOutput {
differ.Diff(outBuf.String())
if tester.err == nil && differ.Error() != nil {
tester.err = differ.Error()
}
tester.printResult(testName)
outBuf.Reset()
isOutput = false
tester.err = nil
}
prefixLen := len(testPrefix)
testName = strings.TrimLeft(strings.TrimSpace(text[prefixLen:]), ": ")
} else if strings.HasPrefix(text, inPrefix) {
isInput = true
w := strings.TrimLeft(strings.TrimSpace(text[len(inPrefix):]), ": ")
wait = tester.parseInputWait(w)
} else if strings.HasPrefix(text, outPrefix) {
isOutput = true
if isInput {
time.Sleep(wait)
if response, err = tester.request(inBuf.String()); err != nil {
log.Printf("Failed nGQL statment: %s", inBuf.String())
tester.err = err
} else {
if d, err := tester.newDiffer(text, response); err != nil {
tester.err = err
} else {
differ = d
}
}
isInput = false
inBuf.Reset()
}
} else {
if isInput {
if !strings.HasPrefix(text, "--") && !strings.HasPrefix(text, "#") && !strings.HasPrefix(text, "//") {
// text = fmt.Sprintf("%q", text)
text = strings.TrimRight(text, "\\ \"")
text = strings.TrimLeft(text, "\"")
inBuf.WriteString(text)
}
}
if isOutput {
if outBuf.Len() > 0 {
outBuf.WriteString("\n")
}
outBuf.WriteString(text)
}
}
}
if isOutput {
differ.Diff(outBuf.String())
if tester.err == nil && differ.Error() != nil {
tester.err = differ.Error()
}
tester.printResult(testName)
outBuf.Reset()
tester.err = nil
}
}
func (tester *Tester) request(gql string) (*graph.ExecutionResponse, error) {
gql = strings.TrimSpace(gql)
resp, err := tester.client.Execute(gql)
if err != nil {
return nil, err
}
if resp.GetErrorCode() != graph.ErrorCode_SUCCEEDED {
return nil, fmt.Errorf("ErrorCode: %v, ErrorMsg: %s", resp.GetErrorCode(), resp.GetErrorMsg())
}
return resp, nil
}
func (tester *Tester) newDiffer(outText string, response *graph.ExecutionResponse) (Differ, error) {
dType, order := "json", false
index := strings.Index(outText, ",")
if index >= 0 {
index = strings.Index(outText, ":")
dType, order = tester.getOptions(outText[index+1:])
}
dType = "json"
// TODO: Now only support JSON parser
if differ, err := NewDiffer(response, dType, order); err != nil {
return nil, err
} else {
return differ, nil
}
}
func (t *Tester) getOptions(config string) (dType string, order bool) {
options := strings.Split(config, ",")
dType = "table"
order = false
for _, op := range options {
if index := strings.Index(op, "="); index < 0 {
continue
}
kv := strings.Split(op, "=")
key := strings.Trim(strings.ToLower(kv[0]), " ")
value := strings.Trim(strings.ToLower(kv[1]), " ")
switch key {
case "type":
dType = value
case "order":
if b, err := strconv.ParseBool(value); err != nil {
log.Printf("Invalid order type: %s", kv[1])
} else {
order = b
}
default:
log.Fatalf("Unvalid key: %s", key)
}
}
return dType, order
}
func (t *Tester) parseInputWait(s string) time.Duration {
if len(s) == 0 {
d, _ := time.ParseDuration("0s")
return d
}
kv := strings.Split(s, "=")
if len(kv) != 2 || strings.ToLower(kv[0]) != "wait" {
log.Println("Invalid option format, like wait=10s")
d, _ := time.ParseDuration("0s")
return d
}
if d, err := time.ParseDuration(strings.TrimSpace(kv[1])); err != nil {
log.Printf("Error wait format: %s", kv[1])
d, _ = time.ParseDuration("0s")
return d
} else {
return d
}
}
func (t *Tester) printResult(testName string) {
if t.err != nil {
log.Printf("Test (%s) fails.\n%s", testName, t.err.Error())
} else {
log.Printf("Test (%s) passed.", testName)
}
}
|
package api
import (
"github.com/graphql-go/graphql"
"github.com/graphql-go/relay"
"golang.org/x/net/context"
)
var nodeDefinitions = *relay.NewNodeDefinitions(relay.NodeDefinitionsConfig{
IDFetcher: func(id string, info graphql.ResolveInfo, ctx context.Context) (interface{}, error) {
/* c := NewContext(ctx) */
// resolve id from global id
/* resolvedID := relay.FromGlobalID(id) */
// based on id and its type, return the object
/* switch resolvedID.Type {
case "Faction":
return GetFaction(resolvedID.ID), nil
case "Ship":
return GetShip(resolvedID.ID), nil
default:
return nil, errors.New("Unknown node type")
} */
return nil, nil
},
TypeResolve: func(p graphql.ResolveTypeParams) *graphql.Object {
// based on the type of the value, return GraphQLObjectType
/* switch p.Value.(type) {
case *Faction:
return factionType
default:
return shipType
} */
return nil
},
})
|
package urlutil
import (
"encoding/base64"
"fmt"
"net/url"
"strconv"
"time"
"github.com/pomerium/pomerium/pkg/cryptutil"
)
// SignedURL is a shared-key HMAC wrapped URL.
type SignedURL struct {
uri url.URL
key []byte
signed bool
// mockable time for testing
timeNow func() time.Time
}
// NewSignedURL creates a new copy of a URL that can be signed with a shared key.
//
// N.B. It is the user's responsibility to make sure the key is 256 bits and the url is not nil.
func NewSignedURL(key []byte, uri *url.URL) *SignedURL {
return &SignedURL{uri: *uri, key: key, timeNow: time.Now} // uri is copied
}
// Sign creates a shared-key HMAC signed URL.
func (su *SignedURL) Sign() *url.URL {
now := su.timeNow()
issued := newNumericDate(now)
expiry := newNumericDate(now.Add(5 * time.Minute))
params := su.uri.Query()
params.Set(QueryHmacIssued, fmt.Sprint(issued))
params.Set(QueryHmacExpiry, fmt.Sprint(expiry))
su.uri.RawQuery = params.Encode()
params.Set(QueryHmacSignature, hmacURL(su.key, su.uri.String(), issued, expiry))
su.uri.RawQuery = params.Encode()
su.signed = true
return &su.uri
}
// String implements the stringer interface and returns a signed URL string.
func (su *SignedURL) String() string {
if !su.signed {
su.Sign()
su.signed = true
}
return su.uri.String()
}
// Validate checks to see if a signed URL is valid.
func (su *SignedURL) Validate() error {
now := su.timeNow()
params := su.uri.Query()
sig, err := base64.URLEncoding.DecodeString(params.Get(QueryHmacSignature))
if err != nil {
return fmt.Errorf("internal/urlutil: malformed signature %w", err)
}
params.Del(QueryHmacSignature)
su.uri.RawQuery = params.Encode()
issued, err := newNumericDateFromString(params.Get(QueryHmacIssued))
if err != nil {
return err
}
expiry, err := newNumericDateFromString(params.Get(QueryHmacExpiry))
if err != nil {
return err
}
if expiry != nil && now.Add(-DefaultLeeway).After(expiry.Time()) {
return ErrExpired
}
if issued != nil && now.Add(DefaultLeeway).Before(issued.Time()) {
return ErrIssuedInTheFuture
}
validHMAC := cryptutil.CheckHMAC(
[]byte(fmt.Sprint(su.uri.String(), issued, expiry)),
sig,
su.key)
if !validHMAC {
return fmt.Errorf("internal/urlutil: hmac failed")
}
return nil
}
// hmacURL takes a redirect url string and timestamp and returns the base64
// encoded HMAC result.
func hmacURL(key []byte, data ...interface{}) string {
h := cryptutil.GenerateHMAC([]byte(fmt.Sprint(data...)), key)
return base64.URLEncoding.EncodeToString(h)
}
// numericDate used because we don't need the precision of a typical time.Time.
type numericDate int64
func newNumericDate(t time.Time) *numericDate {
if t.IsZero() {
return nil
}
out := numericDate(t.Unix())
return &out
}
func newNumericDateFromString(s string) (*numericDate, error) {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return nil, ErrNumericDateMalformed
}
out := numericDate(i)
return &out, nil
}
func (n *numericDate) Time() time.Time {
if n == nil {
return time.Time{}
}
return time.Unix(int64(*n), 0)
}
func (n *numericDate) String() string {
return strconv.FormatInt(int64(*n), 10)
}
|
package handler
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
)
func UploadHandler(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
// 返回上传html页面
data,err := ioutil.ReadFile("./static/view/index.html")
if err != nil{
io.WriteString(w,"internal server error"+err.Error())
return
}else {
io.WriteString(w, string(data))
return
}
} else if r.Method == "POST" {
// 接收文件流以及储存到本地目录
file, head, err := r.FormFile("file")
if err!=nil{
fmt.Printf("Fail to get data, err:%s\n",err.Error())
return
}
defer file.Close()
newFile, err:= os.Create("./"+head.Filename)
if err!= nil {
fmt.Printf("Fail to get data, err:%s\n",err.Error())
}
defer newFile.Close()
_, err = io.Copy(newFile, file)
if err != nil{
fmt.Printf("Failed to save data into file, err:%s\n",err.Error())
return
}
// 重定向
http.Redirect(w, r,"/file/upload/suc",http.StatusFound)
}
}
// UploadSucHandler:上传已完成
func UploadSucHandler(w http.ResponseWriter, r *http.Request){
io.WriteString(w,"upload finished!")
}
|
package platform
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testClient struct {
Num int
Buf []byte
}
func (tcl testClient) Update(_ *Context) error {
return nil
}
func TestPlatform_state_reload(t *testing.T) {
var save []byte
require.NoError(t, func() error {
p, err := New(nil, nil, Config{})
require.NoError(t, err)
var cl testClient
p.TimingEnabled = true
p.client = &cl
cl.Num = 42
cl.Buf = append(cl.Buf, "hello"...)
var buf bytes.Buffer
if err := p.writeState(&buf); err != nil {
return err
}
save = buf.Bytes()
return nil
}(), "unexpected writeState error")
require.NoError(t, func() error {
p, err := New(nil, nil, Config{})
require.NoError(t, err)
var cl testClient
p.client = &cl
if err := p.readState(bytes.NewReader(save)); err != nil {
return err
}
assert.True(t, p.TimingEnabled)
assert.Equal(t, 42, cl.Num)
assert.Equal(t, "hello", string(cl.Buf))
return nil
}(), "unexpected readState error")
}
func TestPlatform_state_rewind(t *testing.T) {
require.NoError(t, func() error {
p, err := New(nil, nil, Config{})
require.NoError(t, err)
var cl testClient
p.TimingEnabled = true
p.client = &cl
cl.Num = 42
cl.Buf = append(cl.Buf, "hello"...)
var buf bytes.Buffer
if err := p.writeState(&buf); err != nil {
return fmt.Errorf("failed first save: %v", err)
}
save1 := append([]byte(nil), buf.Bytes()...)
cl.Num++
cl.Buf = append(cl.Buf, " world"...)
p.TimingEnabled = false
buf.Reset()
if err := p.writeState(&buf); err != nil {
return fmt.Errorf("failed second save: %v", err)
}
save2 := append([]byte(nil), buf.Bytes()...)
// load save1
if err := p.readState(bytes.NewReader(save1)); err != nil {
return fmt.Errorf("failed first read: %v", err)
}
assert.False(t, p.LogTiming)
assert.True(t, p.TimingEnabled)
assert.Equal(t, 42, cl.Num)
assert.Equal(t, "hello", string(cl.Buf))
// load save2
if err := p.readState(bytes.NewReader(save2)); err != nil {
return fmt.Errorf("failed second read: %v", err)
}
assert.False(t, p.LogTiming)
assert.False(t, p.TimingEnabled)
assert.Equal(t, 43, cl.Num)
assert.Equal(t, "hello world", string(cl.Buf))
// load save1
if err := p.readState(bytes.NewReader(save1)); err != nil {
return fmt.Errorf("failed third read: %v", err)
}
assert.False(t, p.LogTiming)
assert.True(t, p.TimingEnabled)
assert.Equal(t, 42, cl.Num)
assert.Equal(t, "hello", string(cl.Buf))
return nil
}(), "unexpected write/readState error")
}
|
package main
import "fmt"
func main() {
a := [...]int{1,2,3,4,5}
s1 := a[2:5]
fmt.Println("s1:", s1)
fmt.Println("s1:", len(s1), cap(s1))
s2 := a[:3]
fmt.Println("s2:", len(s2), cap(s2))
fmt.Println("s2:", s2)
///////////////////////////////////////
fmt.Println("a:", a)
s2 = append(s2, 7, 8)
fmt.Println("a:", a)
///////////////////////////////////////
s3 := make([]int, 3, 6)
fmt.Println("s3:", len(s3), cap(s3))
fmt.Printf("%v => %p\n", s3, s3)
s3 = append(s3, 1, 2, 3)
fmt.Printf("%v => %p\n", s3, s3)
s3 = append(s3, 7, 8, 9)
fmt.Printf("%v => %p\n", s3, s3)
///////////////////////////////////////
copy(s3, s2)
fmt.Println(s3)
}
//s1: [2 3 4 5]
//s2: [1 2 3]
//a: [1 2 3 4 5]
//a: [1 2 3 7 8]
//s3: 3 6
//[0 0 0] => 0xc420014210
//[0 0 0 1 2 3] => 0xc420014210
//[0 0 0 1 2 3 7 8 9] => 0xc420078060
//[1 2 3 7 8 3 7 8 9]
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package azurestack
import (
"context"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
)
// ListLocations returns the Azure regions available to the subscription.
func (az *AzureClient) ListLocations(ctx context.Context) (*[]subscriptions.Location, error) {
list, error := az.subscriptionsClient.ListLocations(ctx, az.subscriptionID)
if error != nil {
return nil, error
}
return list.Value, nil
}
|
// +build plan9 solaris
package goselect
import (
"fmt"
"runtime"
"syscall"
)
// ErrUnsupported .
var ErrUnsupported = fmt.Errorf("Platofrm %s/%s unsupported", runtime.GOOS, runtime.GOARCH)
func sysSelect(n int, r, w, e *FDSet, timeout *syscall.Timeval) (int, error) {
return 0, ErrUnsupported
}
|
package generic
import (
"context"
"github.com/loft-sh/vcluster/pkg/util/loghelper"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type forwardClusterController struct {
synced func()
target ClusterSyncer
log loghelper.Logger
localClient client.Client
virtualClient client.Client
scheme *runtime.Scheme
}
func (r *forwardClusterController) GarbageCollect(queue workqueue.RateLimitingInterface) error {
ctx := context.Background()
// list all virtual objects first
vList := r.target.NewList()
err := r.virtualClient.List(ctx, vList)
if err != nil {
return err
}
// check if physical object exists
vItems, err := meta.ExtractList(vList)
if err != nil {
return err
}
for _, vObj := range vItems {
vAccessor, _ := meta.Accessor(vObj)
pObj := r.target.New()
err = r.localClient.Get(ctx, types.NamespacedName{
Name: vAccessor.GetName(),
}, pObj)
if kerrors.IsNotFound(err) {
// we ignore this case as we only update cluster resources on host, but never create them ourselves
continue
} else if err != nil {
r.log.Infof("cannot get physical object %s: %v", vAccessor.GetName(), err)
continue
}
updateNeeded, err := r.target.ForwardUpdateNeeded(pObj, vObj.(client.Object))
if err != nil {
r.log.Infof("error in update needed for virtual object %s: %v", vAccessor.GetName(), err)
continue
}
if updateNeeded {
r.log.Debugf("resync virtual object %s", vAccessor.GetName())
queue.Add(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: vAccessor.GetName(),
},
})
}
}
return nil
}
func (r *forwardClusterController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// make sure the caches are synced
r.synced()
// check if we should skip reconcile
lifecycle, ok := r.target.(ForwardLifecycle)
if ok {
skip, err := lifecycle.ForwardStart(ctx, req)
defer lifecycle.ForwardEnd()
if skip || err != nil {
return ctrl.Result{}, err
}
}
// get virtual object
vObj := r.target.New()
vExists := true
err := r.virtualClient.Get(ctx, req.NamespacedName, vObj)
if err != nil {
if kerrors.IsNotFound(err) == false {
return ctrl.Result{}, err
}
vExists = false
}
// get physical object
pObj := r.target.New()
pExists := true
err = r.localClient.Get(ctx, req.NamespacedName, pObj)
if err != nil {
if kerrors.IsNotFound(err) == false {
return ctrl.Result{}, err
}
pExists = false
}
if vExists && pExists {
return r.target.ForwardUpdate(ctx, pObj, vObj, r.log)
} else if !vExists {
return ctrl.Result{}, r.target.ForwardOnDelete(ctx, req)
}
return ctrl.Result{}, nil
}
|
package main
import "fmt"
func NewMap(name string) map[string]string {
if name == "" {
return nil
} else {
return map[string]string{
"name": name,
}
}
}
func main() {
// Nil sendiri hanya bisa digunakan di beberapa tipe data, seperti interface, function, map, slice, pointer dan channel
// Nil adalah data kosong
data := NewMap("")
if data == nil {
fmt.Println("Data kosong")
} else {
fmt.Println(data)
}
}
|
package main
import (
"fmt"
"time"
"runtime"
)
func main() {
//tryGoRoutine()
printCpuNum()
cpuLimit()
}
func tryGoRoutine() {
for i := 0; i < 1000; i++ {
go func(i int) {
for {
fmt.Printf("go routine %d\n", i)
}
}(i)
}
time.Sleep(time.Millisecond)
}
func printCpuNum(){
num:=runtime.NumCPU()
fmt.Printf("%d\n",num)
}
func cpuLimit(){
processors:=runtime.GOMAXPROCS(runtime.NumCPU() - 1)
fmt.Println("runtime cpus: ",processors)
}
|
package main
import (
"fmt"
"log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
hello "github.com/sanguohot/medichain/contracts/hello" // for demo
)
func main() {
client, err := ethclient.Dial("http://10.6.250.56:8545")
if err != nil {
log.Fatal(err)
return
}
// 0xb349Eba018bFA9d89Da90829629D39668F6653A2
// 0xca21167a870cf8b9618d259af454c6d00b30b028
// 0xB818715eb048286A608B5E9851877AD7A30a41A0
address := common.HexToAddress("0xb349Eba018bFA9d89Da90829629D39668F6653A2")
instance, err := hello.NewHello(address, client)
if err != nil {
log.Fatal(err)
return
}
result, err := instance.Speak(nil)
if err != nil {
log.Fatal(err)
return
}
fmt.Println(result) // "1.0"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.