text stringlengths 11 4.05M |
|---|
package raft
const (
HeartBeatRequest = iota
HeartBeatResponse
VoteRequest
VoteResponse
ClientRequst
ClientResponse
NodeInfoRequest
NodeInfoResponse
)
const (
UnknowType = 01
UsrClient = 02
RaftNode = 04
)
type Header struct {
Length uint32
Type uint32
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tablestore
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/timer/api"
)
type timerExt struct {
Tags []string `json:"tags,omitempty"`
Manual *manualRequestObj `json:"manual,omitempty"`
Event *eventExtObj `json:"event,omitempty"`
}
// CreateTimerTableSQL returns a SQL to create timer table
func CreateTimerTableSQL(dbName, tableName string) string {
return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s (
ID BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
NAMESPACE VARCHAR(256) NOT NULL,
TIMER_KEY VARCHAR(256) NOT NULL,
TIMER_DATA BLOB,
TIMEZONE VARCHAR(64) NOT NULL,
SCHED_POLICY_TYPE VARCHAR(32) NOT NULL,
SCHED_POLICY_EXPR VARCHAR(256) NOT NULL,
HOOK_CLASS VARCHAR(64) NOT NULL,
WATERMARK TIMESTAMP DEFAULT NULL,
ENABLE TINYINT(2) NOT NULL,
TIMER_EXT JSON NOT NULL,
EVENT_STATUS VARCHAR(32) NOT NULL,
EVENT_ID VARCHAR(64) NOT NULL,
EVENT_DATA BLOB,
EVENT_START TIMESTAMP DEFAULT NULL,
SUMMARY_DATA BLOB,
CREATE_TIME TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UPDATE_TIME TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
VERSION BIGINT(64) UNSIGNED NOT NULL,
PRIMARY KEY (ID),
UNIQUE KEY timer_key(NAMESPACE, TIMER_KEY),
KEY hook_class(HOOK_CLASS)
)`, indentString(dbName, tableName))
}
func indentString(dbName, tableName string) string {
return fmt.Sprintf("`%s`.`%s`", dbName, tableName)
}
func buildInsertTimerSQL(dbName, tableName string, record *api.TimerRecord) (string, []any, error) {
var watermark, eventStart any
watermarkFormat, eventStartFormat := "%?", "%?"
if !record.Watermark.IsZero() {
watermark = record.Watermark.Unix()
watermarkFormat = "FROM_UNIXTIME(%?)"
}
if !record.EventStart.IsZero() {
eventStart = record.EventStart.Unix()
eventStartFormat = "FROM_UNIXTIME(%?)"
}
eventStatus := record.EventStatus
if eventStatus == "" {
eventStatus = api.SchedEventIdle
}
ext := &timerExt{
Tags: record.Tags,
Manual: newManualRequestObj(record.ManualRequest),
Event: newEventExtObj(record.EventExtra),
}
extJSON, err := json.Marshal(ext)
if err != nil {
return "", nil, err
}
sql := fmt.Sprintf("INSERT INTO %s ("+
"NAMESPACE, "+
"TIMER_KEY, "+
"TIMER_DATA, "+
"TIMEZONE, "+
"SCHED_POLICY_TYPE, "+
"SCHED_POLICY_EXPR, "+
"HOOK_CLASS, "+
"WATERMARK, "+
"ENABLE, "+
"TIMER_EXT, "+
"EVENT_ID, "+
"EVENT_STATUS, "+
"EVENT_START, "+
"EVENT_DATA, "+
"SUMMARY_DATA, "+
"VERSION) "+
"VALUES (%%?, %%?, %%?, %%?, %%?, %%?, %%?, %s, %%?, JSON_MERGE_PATCH('{}', %%?), %%?, %%?, %s, %%?, %%?, 1)",
indentString(dbName, tableName),
watermarkFormat,
eventStartFormat,
)
return sql, []any{
record.Namespace,
record.Key,
record.Data,
record.TimeZone,
string(record.SchedPolicyType),
record.SchedPolicyExpr,
record.HookClass,
watermark,
record.Enable,
json.RawMessage(extJSON),
record.EventID,
string(eventStatus),
eventStart,
record.EventData,
record.SummaryData,
}, nil
}
func buildSelectTimerSQL(dbName, tableName string, cond api.Cond) (string, []any, error) {
criteria, args, err := buildCondCriteria(cond, make([]any, 0, 8))
if err != nil {
return "", nil, err
}
sql := fmt.Sprintf("SELECT "+
"ID, "+
"NAMESPACE, "+
"TIMER_KEY, "+
"TIMER_DATA, "+
"TIMEZONE, "+
"SCHED_POLICY_TYPE, "+
"SCHED_POLICY_EXPR, "+
"HOOK_CLASS, "+
"WATERMARK, "+
"ENABLE, "+
"TIMER_EXT, "+
"EVENT_STATUS, "+
"EVENT_ID, "+
"EVENT_DATA, "+
"EVENT_START, "+
"SUMMARY_DATA, "+
"CREATE_TIME, "+
"UPDATE_TIME, "+
"VERSION "+
"FROM %s WHERE %s",
indentString(dbName, tableName),
criteria,
)
return sql, args, nil
}
func buildCondCriteria(cond api.Cond, args []any) (criteria string, _ []any, err error) {
if cond == nil {
return "1", args, nil
}
switch c := cond.(type) {
case *api.TimerCond:
criteria, args, err = buildTimerCondCriteria(c, args)
if err != nil {
return "", nil, err
}
return criteria, args, nil
case *api.Operator:
return buildOperatorCriteria(c, args)
default:
return "", nil, errors.Errorf("unsupported condition type: %T", cond)
}
}
func buildTimerCondCriteria(cond *api.TimerCond, args []any) (string, []any, error) {
items := make([]string, 0, cap(args)-len(args))
if val, ok := cond.ID.Get(); ok {
items = append(items, "ID = %?")
args = append(args, val)
}
if val, ok := cond.Namespace.Get(); ok {
items = append(items, "NAMESPACE = %?")
args = append(args, val)
}
if val, ok := cond.Key.Get(); ok {
if cond.KeyPrefix {
items = append(items, "TIMER_KEY LIKE %?")
args = append(args, val+"%")
} else {
items = append(items, "TIMER_KEY = %?")
args = append(args, val)
}
}
if vals, ok := cond.Tags.Get(); ok && len(vals) > 0 {
bs, err := json.Marshal(vals)
if err != nil {
return "", nil, err
}
items = append(items,
"JSON_EXTRACT(TIMER_EXT, '$.tags') IS NOT NULL",
"JSON_CONTAINS((TIMER_EXT->'$.tags'), %?)",
)
args = append(args, json.RawMessage(bs))
}
if len(items) == 0 {
return "1", args, nil
}
return strings.Join(items, " AND "), args, nil
}
func buildOperatorCriteria(op *api.Operator, args []any) (string, []any, error) {
if len(op.Children) == 0 {
return "", nil, errors.New("children should not be empty")
}
var opStr string
switch op.Op {
case api.OperatorAnd:
opStr = "AND"
case api.OperatorOr:
opStr = "OR"
default:
return "", nil, errors.Errorf("unsupported operator: %v", op.Op)
}
criteriaList := make([]string, 0, len(op.Children))
for _, child := range op.Children {
var criteria string
var err error
criteria, args, err = buildCondCriteria(child, args)
if err != nil {
return "", nil, err
}
if len(op.Children) > 1 && criteria != "1" && criteria != "0" {
criteria = fmt.Sprintf("(%s)", criteria)
}
criteriaList = append(criteriaList, criteria)
}
criteria := strings.Join(criteriaList, " "+opStr+" ")
if op.Not {
switch criteria {
case "0":
criteria = "1"
case "1":
criteria = "0"
default:
criteria = fmt.Sprintf("!(%s)", criteria)
}
}
return criteria, args, nil
}
func buildUpdateTimerSQL(dbName, tblName string, timerID string, update *api.TimerUpdate) (string, []any, error) {
criteria, args, err := buildUpdateCriteria(update, make([]any, 0, 6))
if err != nil {
return "", nil, err
}
sql := fmt.Sprintf("UPDATE %s SET %s WHERE ID = %%?", indentString(dbName, tblName), criteria)
return sql, append(args, timerID), nil
}
type manualRequestObj struct {
RequestID *string `json:"request_id"`
RequestTimeUnix *int64 `json:"request_time_unix"`
TimeoutSec *int64 `json:"timeout_sec"`
Processed *bool `json:"processed"`
EventID *string `json:"event_id"`
}
func newManualRequestObj(manual api.ManualRequest) *manualRequestObj {
var empty api.ManualRequest
if manual == empty {
return nil
}
obj := &manualRequestObj{}
if v := manual.ManualRequestID; v != "" {
obj.RequestID = &v
}
if v := manual.ManualRequestTime; !v.IsZero() {
unix := v.Unix()
obj.RequestTimeUnix = &unix
}
if v := manual.ManualTimeout; v != 0 {
sec := int64(v / time.Second)
obj.TimeoutSec = &sec
}
if v := manual.ManualProcessed; v {
processed := true
obj.Processed = &processed
}
if v := manual.ManualEventID; v != "" {
obj.EventID = &v
}
return obj
}
func (o *manualRequestObj) ToManualRequest() (r api.ManualRequest) {
if o == nil {
return
}
if v := o.RequestID; v != nil {
r.ManualRequestID = *v
}
if v := o.RequestTimeUnix; v != nil {
r.ManualRequestTime = time.Unix(*v, 0)
}
if v := o.TimeoutSec; v != nil {
r.ManualTimeout = time.Duration(*v) * time.Second
}
if v := o.Processed; v != nil {
r.ManualProcessed = *v
}
if v := o.EventID; v != nil {
r.ManualEventID = *v
}
return r
}
type eventExtObj struct {
ManualRequestID *string `json:"manual_request_id"`
WatermarkUnix *int64 `json:"watermark_unix"`
}
func newEventExtObj(e api.EventExtra) *eventExtObj {
var empty api.EventExtra
if e == empty {
return nil
}
obj := &eventExtObj{}
if v := e.EventManualRequestID; v != "" {
obj.ManualRequestID = &v
}
if v := e.EventWatermark; !v.IsZero() {
unix := v.Unix()
obj.WatermarkUnix = &unix
}
return obj
}
func (o *eventExtObj) ToEventExtra() (e api.EventExtra) {
if o == nil {
return
}
if v := o.ManualRequestID; v != nil {
e.EventManualRequestID = *v
}
if v := o.WatermarkUnix; v != nil {
e.EventWatermark = time.Unix(*o.WatermarkUnix, 0)
}
return
}
func buildUpdateCriteria(update *api.TimerUpdate, args []any) (string, []any, error) {
updateFields := make([]string, 0, cap(args)-len(args))
if val, ok := update.Enable.Get(); ok {
updateFields = append(updateFields, "ENABLE = %?")
args = append(args, val)
}
extFields := make(map[string]any)
if val, ok := update.Tags.Get(); ok {
if len(val) == 0 {
val = nil
}
extFields["tags"] = val
}
if val, ok := update.ManualRequest.Get(); ok {
extFields["manual"] = newManualRequestObj(val)
}
if val, ok := update.EventExtra.Get(); ok {
extFields["event"] = newEventExtObj(val)
}
if val, ok := update.TimeZone.Get(); ok {
updateFields = append(updateFields, "TIMEZONE = %?")
args = append(args, val)
}
if val, ok := update.SchedPolicyType.Get(); ok {
updateFields = append(updateFields, "SCHED_POLICY_TYPE = %?")
args = append(args, string(val))
}
if val, ok := update.SchedPolicyExpr.Get(); ok {
updateFields = append(updateFields, "SCHED_POLICY_EXPR = %?")
args = append(args, val)
}
if val, ok := update.EventStatus.Get(); ok {
updateFields = append(updateFields, "EVENT_STATUS = %?")
args = append(args, string(val))
}
if val, ok := update.EventID.Get(); ok {
updateFields = append(updateFields, "EVENT_ID = %?")
args = append(args, val)
}
if val, ok := update.EventData.Get(); ok {
updateFields = append(updateFields, "EVENT_DATA = %?")
args = append(args, val)
}
if val, ok := update.EventStart.Get(); ok {
if val.IsZero() {
updateFields = append(updateFields, "EVENT_START = NULL")
} else {
updateFields = append(updateFields, "EVENT_START = FROM_UNIXTIME(%?)")
args = append(args, val.Unix())
}
}
if val, ok := update.Watermark.Get(); ok {
if val.IsZero() {
updateFields = append(updateFields, "WATERMARK = NULL")
} else {
updateFields = append(updateFields, "WATERMARK = FROM_UNIXTIME(%?)")
args = append(args, val.Unix())
}
}
if val, ok := update.SummaryData.Get(); ok {
updateFields = append(updateFields, "SUMMARY_DATA = %?")
args = append(args, val)
}
if len(extFields) > 0 {
jsonBytes, err := json.Marshal(extFields)
if err != nil {
return "", nil, err
}
updateFields = append(updateFields, "TIMER_EXT = JSON_MERGE_PATCH(TIMER_EXT, %?)")
args = append(args, json.RawMessage(jsonBytes))
}
updateFields = append(updateFields, "VERSION = VERSION + 1")
return strings.Join(updateFields, ", "), args, nil
}
func buildDeleteTimerSQL(dbName, tblName string, timerID string) (string, []any) {
return fmt.Sprintf("DELETE FROM %s WHERE ID = %%?", indentString(dbName, tblName)), []any{timerID}
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
package collect
import (
"expvar"
"log"
"runtime"
"strings"
"sync"
"time"
"opentsp.org/contrib/collect-netscaler/nitro"
"opentsp.org/internal/tsdb"
)
var (
statCycleMillis = expvar.NewInt("collect.CycleMillis")
)
// collector represents a collector for a given subsystem.
type collector interface {
Subsystem() string // "lbvserver", "ssl", etc.
}
// statsCollector is a collector that calls a member of the "stat" family of
// functions in the Nitro API.
type statsCollector interface {
CollectStats(emitFn, *nitro.ResponseStat)
}
// configCollector is a collector that calls a member of the "config" family of
// functions in the Nitro API.
type configCollector interface {
CollectConfig(emitFn, *nitro.ResponseConfig)
}
// emitFn queues a data point for emission.
type emitFn func(string, interface{})
var Client *nitro.Client
// collect emits data points based on the provided collector.
func collect(emit emitFn, c collector) {
switch cc := c.(type) {
default:
log.Panicf("unsupported collector type: %T", c)
case statsCollector:
resp, err := Client.Stat.Get(c.Subsystem())
if err != nil {
log.Print(err)
return
}
logPanics(func() {
cc.CollectStats(emit, resp)
})
case configCollector:
resp, err := Client.Config.Get(c.Subsystem())
if err != nil {
log.Print(err)
return
}
logPanics(func() {
cc.CollectConfig(emit, resp)
})
}
}
// Loop loops indefinitely, running all collectors at the provided interval,
// and writing to w the resulting data points.
func Loop(w chan *tsdb.Point, interval time.Duration) {
tick := tsdb.Tick(interval)
t := time.Now()
for ; ; t = <-tick {
start := time.Now()
emit := newEmitter(w, t)
var wg sync.WaitGroup
for _, c := range collectors {
c := c
go func() {
collect(emit, c)
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
statCycleMillis.Add(time.Since(start).Nanoseconds() / 1e6)
}
}
// newEmitter returns a function that emits data points for the provided
// time instant.
func newEmitter(w chan *tsdb.Point, timestamp time.Time) emitFn {
return func(series string, value interface{}) {
var v interface{}
if value == nil {
panic("zero value")
}
switch u := value.(type) {
case uint64, int64, int, float64:
v = u
case *uint64:
if u != nil {
v = *u
} else {
return
}
case *float64:
if u != nil {
v = *u
} else {
return
}
default:
log.Panicf("unsupported type: %T", value)
}
series = "netscaler." + series
id := strings.Fields(strings.Replace(series, "=", " ", -1))
p, err := tsdb.NewPoint(timestamp, v, id[0], id[1:]...)
if err != nil {
panic(err)
}
w <- p
}
}
// collectors is a list of all registered collectors.
var collectors []collector
func register(c collector) {
collectors = append(collectors, c)
}
func registerStatFunc(subsystem string, fn func(emitFn, *nitro.ResponseStat)) {
register(statFunc{
subsystem: subsystem,
fn: fn,
})
}
func registerConfigFunc(subsystem string, fn func(emitFn, *nitro.ResponseConfig)) {
register(configFunc{
subsystem: subsystem,
fn: fn,
})
}
// statFunc is an adapter to allow the use of ordinary functions as stats
// collectors.
type statFunc struct {
subsystem string
fn func(emitFn, *nitro.ResponseStat)
}
func (sf statFunc) Subsystem() string {
return sf.subsystem
}
func (sf statFunc) CollectStats(emit emitFn, r *nitro.ResponseStat) {
sf.fn(emit, r)
}
// configFunc is an adapter to allow the use of ordinary functions as config
// collectors.
type configFunc struct {
subsystem string
fn func(emitFn, *nitro.ResponseConfig)
}
func (cf configFunc) Subsystem() string {
return cf.subsystem
}
func (cf configFunc) CollectConfig(emit emitFn, r *nitro.ResponseConfig) {
cf.fn(emit, r)
}
func logPanics(fn func()) {
defer func() {
if err := recover(); err != nil {
const size = 4096
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("handler panic: %v\n%s", err, buf)
}
}()
fn()
}
|
/*
Given a string, return a "rotated left 2" version where the first 2 chars are moved to the end. The string length will be at least 2.
*/
package main
import (
"fmt"
)
func left2(s string) string {
if len(s) < 2 { return s }
return s[2:] + s[:2]
}
func main(){
var status int = 0
if left2("There") == "ereTh" {
status += 1
}
if left2("te") == "te" {
status += 1
}
if left2("tee") == "ete" {
status += 1
}
if left2("t") == "t" {
status += 1
}
if status == 4 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
package main
import "fmt"
import "math"
const constante string = "gopher"
func main(){
fmt.Println(constante);
const n = 500
const d = 3e20 / n
fmt.Println(d)
fmt.Println(int64(n))
fmt.Println(math.Sin(n))
}
|
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
const (
// CacheEventTypeMiss is the event type for cache misses.
CacheEventTypeMiss = "cache_miss"
// CacheEventTypeHit is the event type for cache hits.
CacheEventTypeHit = "cache_hit"
)
// CacheRecorder is a recorder for cache events.
type CacheRecorder struct {
// cacheEventsCounter is a counter for cache events.
cacheEventsCounter *prometheus.CounterVec
}
// NewCacheRecorder returns a new CacheRecorder.
// The configured labels are: event_type, name, namespace.
// The event_type is one of:
// - "miss"
// - "hit"
// - "update"
//
// The name is the name of the reconciled resource.
// The namespace is the namespace of the reconciled resource.
func NewCacheRecorder() *CacheRecorder {
return &CacheRecorder{
cacheEventsCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gotk_cache_events_total",
Help: "Total number of cache retrieval events for a Gitops Toolkit resource reconciliation.",
},
[]string{"event_type", "name", "namespace"},
),
}
}
// Collectors returns the metrics.Collector objects for the CacheRecorder.
func (r *CacheRecorder) Collectors() []prometheus.Collector {
return []prometheus.Collector{
r.cacheEventsCounter,
}
}
// IncCacheEventCount increment by 1 the cache event count for the given event type, name and namespace.
func (r *CacheRecorder) IncCacheEvents(event, name, namespace string) {
r.cacheEventsCounter.WithLabelValues(event, name, namespace).Inc()
}
// MustMakeMetrics creates a new CacheRecorder, and registers the metrics collectors in the controller-runtime metrics registry.
func MustMakeMetrics() *CacheRecorder {
r := NewCacheRecorder()
metrics.Registry.MustRegister(r.Collectors()...)
return r
}
|
package dushengchen
/**
Submission:
https://leetcode.com/submissions/detail/740152233/
Runtime: 40 ms, faster than 11.76% of Go online submissions for Binary Tree Maximum Path Sum.
Memory Usage: 7.9 MB, less than 36.63% of Go online submissions for Binary Tree Maximum Path Sum.
*/
func maxPathSum(root *TreeNode) int {
pass, max := _maxPathSum(root)
return MaxInt(pass, max)
}
/**
返回值为:
1. 通过该节点的最大值
2. 该节点子树内的最大值
*/
func _maxPathSum(root *TreeNode) (int, int) {
if root == nil {
return -1001, -1001
}
lpass, lmax := _maxPathSum(root.Left)
rpass, rmax := _maxPathSum(root.Right)
passMax := MaxInt(lpass+root.Val, rpass+root.Val, root.Val)
return passMax, MaxInt(lmax, rmax, passMax, lpass+root.Val+rpass)
}
|
package client
import (
apischema "github.com/giantswarm/api-schema"
)
// UpdatePassword updates the password for the given user to newPass.
// oldPass must contain the current password, otherwise an error is thrown.
func (client *Client) UpdatePassword(userID, oldPass, newPass string) error {
payload := map[string]string{
"old_password": oldPass,
"new_password": newPass,
}
resp, err := client.postSchemaJSON("/user/"+userID+"/password/update", payload)
if err != nil {
return Mask(err)
}
// Check the status is kind of expected
if err := resp.EnsureStatusCodes(apischema.STATUS_CODE_RESOURCE_UPDATED); err != nil {
return Mask(err)
}
return nil
}
// ResetPassword resets the password of the given user to the new password.
// No verification will be done.
func (client *Client) ResetPassword(userID, newPass string) error {
payload := map[string]string{
"new_password": newPass,
}
resp, err := client.postSchemaJSON("/user/"+userID+"/password/reset", payload)
if err != nil {
return Mask(err)
}
// Check the status is kind of expected
if err := resp.EnsureStatusCodes(apischema.STATUS_CODE_RESOURCE_UPDATED); err != nil {
return Mask(err)
}
return nil
}
|
package spells
import (
"os"
"fmt"
"log"
"path"
"sync"
"io/ioutil"
"database/sql"
"gopkg.in/kyokomi/emoji.v1"
"github.com/deepdeeppink/tgbot/db"
"github.com/deepdeeppink/tgbot/mux"
"github.com/deepdeeppink/tgbot/cfg"
"github.com/deepdeeppink/tgbot/errs"
"github.com/deepdeeppink/tgbot/state"
"github.com/deepdeeppink/tgbot/reports"
)
var (
E = errs.E()
config = cfg.GetConfig()
instance *SpellBook
once sync.Once
tgbpath = os.Getenv("tgbpath")
)
const (
codeDir = "sql"
readFile = "read.sql"
castFile = "cast.sql"
)
type Cache map[string](chan interface{})
type Spell interface {
Read(*state.State) string
Cast(*state.State)
Cache()
IsFile() bool
}
type SqlSpell struct {
reader string
caster string
conn *sql.DB
params []string
mutex *mux.Mux
}
type FileSpell struct {
params []string
}
type SpellBook map[string]Spell
func GetSpellBook() *SpellBook {
once.Do(func() {
instance = newSpellBook()
})
return instance
}
func newSpellBook() *SpellBook {
b := make(SpellBook)
for name, v := range config.Spells {
switch v.Type {
case "file":
b[name] = NewFileSpell(v.Params...)
case "sql":
b[name] = NewSqlSpell(name, v.Params...)
}
}
return &b
}
func NewSqlSpell(codeName string, params... string) *SqlSpell {
conn := db.GetConn()
fname := path.Join(tgbpath, codeDir, codeName, readFile)
reader, err := ioutil.ReadFile(fname); E <- err
fname = path.Join(tgbpath, codeDir, codeName, castFile)
caster, err := ioutil.ReadFile(fname); E <- err
m := mux.NewMux()
return &SqlSpell{reader: string(reader), caster: string(caster), conn: conn, params: params, mutex: m}
}
func (s *SqlSpell) Read(t *state.State) string {
var result string
// TODO: Переписать через text formatting и замену переменных черерх @
code := fmt.Sprintf(s.reader, t.GetValues(s.params...)...)
stmt, err := s.conn.Prepare(code); E <- err
defer stmt.Close()
E <- stmt.QueryRow().Scan(&result)
return emoji.Sprint(result)
}
func (s *SqlSpell) Cast(t *state.State) {
tt := t.Pure(s.params...)
ch, first := s.mutex.Clc(tt)
if first {
s.cast(&tt)
s.mutex.Free(tt)
return
}
<- ch
}
func (s *SqlSpell) cast(t *state.State) {
params := t.GetValues(s.params...)
code := fmt.Sprintf(s.caster, params...)
stmt, err := s.conn.Prepare(code); E <- err
defer stmt.Close()
_, err = stmt.Exec(); E <- err
}
func (s *SqlSpell) IsFile() bool {
return false
}
func (s *SqlSpell) Cache() {
for combo := range combineParams((*s).params) {
s.Cast(state.GetState(combo...))
}
}
func NewFileSpell(params... string) *FileSpell {
return &FileSpell{params: params}
}
func (s *FileSpell) Read(t *state.State) string {
substate := t.Pure(s.params...)
return reports.GetReportFile(&substate)
}
func (s *FileSpell) Cast(t *state.State) {
// Temporary no cast on file spells
}
func (s *FileSpell) IsFile() bool {
return true
}
func (s *FileSpell) Cache() {
for combo := range combineParams((*s).params) {
reports.UpdateFile(state.GetState(combo...))
log.Println(combo, " cached")
}
}
func combineParams(names []string) <-chan []string {
o := make(chan []string)
go func(o chan []string) {
defer close(o)
_combine(o, []string{}, names)
}(o)
return o
}
func _combine(o chan []string, combo []string, names []string) {
if len(names) == 0 {
o <- combo
return
}
var newCombo []string
n := names[0]
for _, v := range config.Params[n] {
newCombo = append(combo, v)
_combine(o, newCombo, names[1:])
}
} |
package oauthcli
import (
"context"
"fmt"
"os"
"golang.org/x/oauth2"
"golang.org/x/oauth2/github"
)
// Setup return an oauth2Config configured to talk
// to github, you need environment variables set
// for your id and secret
func Setup() *oauth2.Config {
return &oauth2.Config{
ClientID: os.Getenv("GITHUB_CLIENT"),
ClientSecret: os.Getenv("GITHUB_SECRET"),
Scopes: []string{"repo", "user"},
Endpoint: github.Endpoint,
}
}
// GetToken retrieves a github oauth2 token
func GetToken(ctx context.Context, conf *oauth2.Config) (*oauth2.Token, error) {
url := conf.AuthCodeURL("state")
fmt.Printf("Type the following url into your browser and follow the directions on screen: %v\n", url)
fmt.Println("Paste the code returned in the redirect URL and hit Enter:")
var code string
if _, err := fmt.Scan(&code); err != nil {
return nil, err
}
return conf.Exchange(ctx, code)
}
|
package bgp
type BGPRPC struct {
Information struct {
Peers []BGPPeer `xml:"bgp-peer"`
} `xml:"bgp-information"`
}
type BGPPeer struct {
IP string `xml:"peer-address"`
ASN string `xml:"peer-as"`
State string `xml:"peer-state"`
Group string `xml:"peer-group"`
Description string `xml:"description"`
Flaps int64 `xml:"flap-count"`
InputMessages int64 `xml:"input-messages"`
OutputMessages int64 `xml:"output-messages"`
RIBs []RIB `xml:"bgp-rib"`
}
type RIB struct {
Name string `xml:"name"`
ActivePrefixes int64 `xml:"active-prefix-count"`
ReceivedPrefixes int64 `xml:"received-prefix-count"`
AcceptedPrefixes int64 `xml:"accepted-prefix-count"`
RejectedPrefixes int64 `xml:"suppressed-prefix-count"`
AdvertisedPrefixes int64 `xml:"advertised-prefix-count"`
}
|
package common
import "time"
type Adapter interface {
Initialize(Configuration) // Initialize the adapter
List(string) []*Task // Return the adapter Task list to print the scrum
Move(Task, string) error // Move the task into the list that have the name ...
NextScrum() // Hook that is called when a scrum is done
Add(description string, list string) // Add a new task in the list
LastScrumDate() time.Time // Get the last scrum date
}
|
package cmds
import (
"io"
"gx/ipfs/Qmf7G7FikwUsm48Jm4Yw4VBGNZuyRaAMzpWDJcW8V71uV2/go-ipfs-cmdkit"
)
// ResponseEmitter encodes and sends the command code's output to the client.
// It is all a command can write to.
type ResponseEmitter interface {
// closes http conn or channel
io.Closer
// SetLength sets the length of the output
// err is an interface{} so we don't have to manually convert to error.
SetLength(length uint64)
// SetError sets the response error
// err is an interface{} so we don't have to manually convert to error.
SetError(err interface{}, code cmdsutil.ErrorType) error
// Gets Stdout and Stderr, for writing to console without using SetOutput
// TODO I'm not sure we really need that, but lets see
//Stdout() io.Writer
//Stderr() io.Writer
// Tee makes this Responseemitter forward all calls to SetError, SetLength and
// Emit to the passed ResponseEmitter
//Tee(ResponseEmitter)
// Emit sends a value
// if value is io.Reader we just copy that to the connection
// other values are marshalled
Emit(value interface{}) error
}
type EncodingEmitter interface {
ResponseEmitter
SetEncoder(func(io.Writer) Encoder)
}
type Header interface {
Head() Head
}
func Copy(re ResponseEmitter, res Response) error {
re.SetLength(res.Length())
for {
v, err := res.Next()
if err == io.EOF {
re.Close()
return nil
}
if err == ErrRcvdError {
re.SetError(res.Error().Message, res.Error().Code)
}
if err != nil {
return err
}
err = re.Emit(v)
if err != nil {
return err
}
}
}
|
// Copyright (c) 2020 twihike. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package structconv
import (
"reflect"
"testing"
)
func TestDecodeStringMap(t *testing.T) {
type testNestedStringMap1 struct {
N1 int
}
type testNestedStringMap2 struct {
N2 int
}
type testStringMap struct {
String string
Bool bool
Int int
Float64 float64
Default string
Rename string `strmap:"alt_key"`
Required string `strmap:",required"`
Omitted string `strmap:"-"`
Nest11 testNestedStringMap1
Nest12 *testNestedStringMap1
Nest2 [][][]*testNestedStringMap2
}
tests := []struct {
name string
in map[string]string
want testStringMap
}{
{
"normal",
map[string]string{
"String": "str",
"Bool": "true",
"Int": "1",
"Float64": "0.3",
"Default": "d",
"alt_key": "alt",
"Required": "r",
"Omitted": "-",
"N1": "1",
"N2": "2",
},
testStringMap{
"str",
true,
1,
0.3,
"d",
"alt",
"r",
"",
testNestedStringMap1{N1: 1},
&testNestedStringMap1{N1: 1},
nil,
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got testStringMap
err := DecodeStringMap(tt.in, &got, nil)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("\nwant = %+v\ngot = %+v", tt.want, got)
}
})
}
}
|
/*
On a crime scene, there are many pieces of evidence that point to a particular person having the murder weapon and motive to kill poor old Tom (although not poor at all).
Create a function that takes phrases/words as clues and forms a sentence formatted to have the murder's name, verb, "Tom as", Reason.
Sn Weapon Verb to be used Reason
1 Poison Poisoned Only X could have such high-grade poison.
2 Knife Butchered Only X was in the kitchen.
3 Bat Swatted Only X is a baseball player.
4 Gun Shot Only X had a gun.
5 Briefcase Smacked Only X was after Tom's money.
Examples
sherlock("Mr.Red", "Knife") ➞ "Mr. Red butchered Tom as only he was in the kitchen."
sherlock("Ms.Blue", "Poison") ➞ "Ms. Blue poisoned Tom as only she could have such high-grade poison."
sherlock("Mr.Red", "Bat") ➞ "Mr. Red swatted Tom as only he was a baseball player."
Notes
Ms/Mr gives you the gender of the murderer.
Feel free to remove the dictionary if you want.
*/
package main
import (
"fmt"
"strings"
)
func main() {
assert(sherlock("Mr.Red", "Knife") == "Mr.Red butchered Tom as only he was in the kitchen.")
assert(sherlock("Ms.Blue", "Poison") == "Ms.Blue poisoned Tom as only she could have such high-grade poison.")
assert(sherlock("Mr.Red", "Bat") == "Mr.Red swatted Tom as only he is a baseball player.")
assert(sherlock("Ms.Purple", "Gun") == "Ms.Purple shot Tom as only she had a gun.")
assert(sherlock("Mr.Brown", "Briefcase") == "Mr.Brown smacked Tom as only he was after Tom's money.")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func sherlock(p, w string) (r string) {
var u string
switch {
case strings.HasPrefix(p, "Mr."):
u = "he"
case strings.HasPrefix(p, "Ms."):
u = "she"
default:
return
}
var v, e string
switch strings.ToLower(w) {
case "poison":
v = "poisoned"
e = "could have such high-grade poison"
case "knife":
v = "butchered"
e = "was in the kitchen"
case "bat":
v = "swatted"
e = "is a baseball player"
case "gun":
v = "shot"
e = "had a gun"
case "briefcase":
v = "smacked"
e = "was after Tom's money"
default:
return
}
r = fmt.Sprintf("%s %s Tom as only %s %s.", p, v, u, e)
return
}
|
package printer
import (
"fmt"
"io"
"strings"
)
// SwiftPrinter implement the Printer interface for Swift programs
type SwiftPrinter struct {
Printer
level int
sameline bool
w io.Writer
}
func (p *SwiftPrinter) Reset() {
p.level = 0
p.sameline = false
}
func (p *SwiftPrinter) PushContext(c ContextType) {
}
func (p *SwiftPrinter) PopContext() {
}
func (p *SwiftPrinter) SetWriter(w io.Writer) {
p.w = w
}
func (p *SwiftPrinter) UpdateLevel(delta int) {
p.level += delta
}
func (p *SwiftPrinter) SameLine() {
p.sameline = true
}
func (p *SwiftPrinter) IsSameLine() bool {
return p.sameline
}
func (p *SwiftPrinter) Chop(line string) string {
return strings.TrimRight(line, COMMA)
}
func (p *SwiftPrinter) indent() string {
if p.sameline {
p.sameline = false
return ""
}
return strings.Repeat(" ", p.level)
}
func (p *SwiftPrinter) Print(values ...string) {
fmt.Fprint(p.w, strings.Join(values, " "))
}
func (p *SwiftPrinter) PrintLevel(term string, values ...string) {
fmt.Fprint(p.w, p.indent(), strings.Join(values, " "), term)
}
func (p *SwiftPrinter) PrintBlockStart(b BlockType, empty bool) {
var open string
switch b {
case CONST, VAR:
open = "("
default:
open = "{"
}
p.PrintLevel(NL, open)
p.UpdateLevel(UP)
}
func (p *SwiftPrinter) PrintBlockEnd(b BlockType) {
var close string
switch b {
case CONST, VAR:
close = ")"
default:
close = "}"
}
p.UpdateLevel(DOWN)
p.PrintLevel(NONE, close)
}
func (p *SwiftPrinter) PrintPackage(name string) {
p.PrintLevel(NL, "package", name)
}
func (p *SwiftPrinter) PrintImport(name, path string) {
p.PrintLevel(NL, "import", name, path)
}
func (p *SwiftPrinter) PrintType(name, typedef string) {
p.PrintLevel(NL, "type", name, typedef)
}
func (p *SwiftPrinter) PrintValue(vtype, typedef, names, values string, ntuple, vtuple bool) {
switch vtype {
case "const":
vtype = "let"
case "":
vtype = "var"
}
p.PrintLevel(NONE, vtype, names)
if len(typedef) > 0 {
p.Print(": ", typedef)
}
if len(values) > 0 {
p.Print(" =", values)
}
p.Print("\n")
}
func (p *SwiftPrinter) PrintStmt(stmt, expr string) {
if len(stmt) > 0 {
p.PrintLevel(NL, stmt, expr)
} else {
p.PrintLevel(NL, expr)
}
}
func (p *SwiftPrinter) PrintReturn(expr string, tuple bool) {
p.PrintStmt("return", expr)
}
func (p *SwiftPrinter) PrintFunc(receiver, name, params, results string) {
p.PrintLevel(NONE, "func ")
if len(receiver) > 0 {
fmt.Fprintf(p.w, "(%s) ", receiver)
}
fmt.Fprintf(p.w, "%s(%s) ", name, params)
if len(results) > 0 {
if strings.ContainsAny(results, " ,") {
// name type or multiple types
fmt.Fprintf(p.w, "-> (%s) ", results)
} else {
fmt.Fprintf(p.w, "-> %s ", results)
}
}
}
func (p *SwiftPrinter) PrintFor(init, cond, post string) {
p.PrintLevel(NONE, "for ")
if len(init) > 0 {
p.Print(init)
}
if len(init) > 0 || len(post) > 0 {
p.Print("; ")
}
p.Print(cond)
if len(post) > 0 {
p.Print(";", post)
}
p.Print("")
}
func (p *SwiftPrinter) PrintRange(key, value, expr string) {
p.PrintLevel(NONE, "for", key)
if len(value) > 0 {
p.Print(",", value)
}
p.Print(" in", expr)
}
func (p *SwiftPrinter) PrintSwitch(init, expr string) {
p.PrintLevel(NONE, "switch ")
if len(init) > 0 {
p.Print(init + "; ")
}
p.Print(expr)
}
func (p *SwiftPrinter) PrintCase(expr string) {
if len(expr) > 0 {
p.PrintLevel(COLON, "case", expr)
} else {
p.PrintLevel(NL, "default:")
}
}
func (p *SwiftPrinter) PrintEndCase() {
// nothing to do
}
func (p *SwiftPrinter) PrintIf(init, cond string) {
p.PrintLevel(NONE, "if ")
if len(init) > 0 {
p.Print(init + "; ")
}
p.Print(cond, "")
}
func (p *SwiftPrinter) PrintElse() {
p.Print(" else ")
}
func (p *SwiftPrinter) PrintEmpty() {
p.PrintLevel(SEMI, "")
}
func (p *SwiftPrinter) PrintAssignment(lhs, op, rhs string, ltuple, rtuple bool) {
p.PrintLevel(NL, lhs, op, rhs)
}
func (p *SwiftPrinter) PrintSend(ch, value string) {
p.PrintLevel(SEMI, ch, "<-", value)
}
func (p *SwiftPrinter) FormatIdent(id, itype string) (ret string) {
switch id {
//ase IOTA:
//ret = strconv.Itoa(p.ctx.iota)
//p.ctx.iota += 1
case "string":
ret = "String"
case "int":
ret = "Int"
case "int8":
ret = "Int8"
case "int32":
ret = "Int32"
case "int64":
ret = "Int64"
case "uint":
ret = "UInt"
case "uint8":
ret = "UInt8"
case "uint32":
ret = "UInt32"
case "uint64":
ret = "UInt64"
case "float32":
ret = "Float"
case "float64":
ret = "Double"
case "bool":
ret = "Bool"
default:
ret = id
}
return
}
func (p *SwiftPrinter) FormatLiteral(lit string) string {
return lit
}
func (p *SwiftPrinter) FormatCompositeLit(typedef, elt string) string {
if strings.HasPrefix(typedef, "Array<") || strings.HasPrefix(typedef, "Slice<") || strings.HasPrefix(typedef, "Dictionary<") {
if len(elt) > 0 {
return fmt.Sprintf("[ %s ]", elt)
} else {
return fmt.Sprintf("%s()", typedef)
}
} else {
return fmt.Sprintf("%s{%s}", typedef, elt)
}
}
func (p *SwiftPrinter) FormatEllipsis(expr string) string {
return fmt.Sprintf("...%s", expr)
}
func (p *SwiftPrinter) FormatStar(expr string) string {
return fmt.Sprintf("*%s", expr)
}
func (p *SwiftPrinter) FormatParen(expr string) string {
return fmt.Sprintf("(%s)", expr)
}
func (p *SwiftPrinter) FormatUnary(op, operand string) string {
return fmt.Sprintf("%s%s", op, operand)
}
func (p *SwiftPrinter) FormatBinary(lhs, op, rhs string) string {
return fmt.Sprintf("%s %s %s", lhs, op, rhs)
}
func (p *SwiftPrinter) FormatPair(v Pair, t FieldType) string {
switch t {
case METHOD:
return p.indent() + v.Name() + v.Value() + NL
case FIELD:
return p.indent() + v.String() + NL
case PARAM:
return v.Name() + ": " + v.Value() + COMMA
default:
return v.String() + COMMA
}
}
func (p *SwiftPrinter) FormatArray(l, elt string) string {
if len(l) == 0 {
return fmt.Sprintf("Slice<%s>", elt)
} else {
return fmt.Sprintf("Array<%s>", elt)
}
}
func (p *SwiftPrinter) FormatArrayIndex(array, index, ctype string) string {
return fmt.Sprintf("%s[%s]", array, index)
}
func (p *SwiftPrinter) FormatMapIndex(array, index, ctype string, check bool) string {
return fmt.Sprintf("%s[%s]", array, index)
}
func (p *SwiftPrinter) FormatSlice(slice, low, high, max string) string {
if max == "" {
return fmt.Sprintf("%s[%s:%s]", slice, low, high)
} else {
return fmt.Sprintf("%s[%s:%s:%s]", slice, low, high, max)
}
}
func (p *SwiftPrinter) FormatMap(key, elt string) string {
return fmt.Sprintf("Dictionary<%s, %s>", key, elt)
}
func (p *SwiftPrinter) FormatKeyValue(key, value string, isMap bool) string {
return fmt.Sprintf("%s: %s", key, value)
}
func (p *SwiftPrinter) FormatStruct(name, fields string) string {
if len(fields) > 0 {
return fmt.Sprintf("struct{\n%s}", fields)
} else {
return "struct{}"
}
}
func (p *SwiftPrinter) FormatInterface(name, methods string) string {
if len(methods) > 0 {
return fmt.Sprintf("interface{\n%s}", methods)
} else {
return "interface{}"
}
}
func (p *SwiftPrinter) FormatChan(chdir, mtype string) string {
return fmt.Sprintf("%s %s", chdir, mtype)
}
func (p *SwiftPrinter) FormatCall(fun, args string, isFuncLit bool) string {
return fmt.Sprintf("%s(%s)", fun, args)
}
func (p *SwiftPrinter) FormatFuncType(params, results string, withFunc bool) string {
prefix := ""
if withFunc {
prefix = "func"
}
if len(results) == 0 {
// no results
return fmt.Sprintf("%s(%s)", prefix, params)
}
if strings.ContainsAny(results, ", ") {
// name type or multiple types
return fmt.Sprintf("%s(%s) (%s)", prefix, params, results)
}
// just type
return fmt.Sprintf("%s(%s) %s", prefix, params, results)
}
func (p *SwiftPrinter) FormatFuncLit(ftype, body string) string {
return fmt.Sprintf("func%s %s", ftype, body)
}
func (p *SwiftPrinter) FormatSelector(pname, sel string, isObject bool) string {
return fmt.Sprintf("%s.%s", pname, sel)
}
func (p *SwiftPrinter) FormatTypeAssert(orig, assert string) string {
return fmt.Sprintf("%s.(%s)", orig, assert)
}
|
package event
import (
"log"
)
type Router struct {
handlers []Handler
}
func NewEventRouter(handlers []Handler) *Router {
return &Router{handlers: handlers}
}
func (er *Router) Route(event Event) {
anyHandler := false
for _, h := range er.handlers {
if h.Handle(event) {
anyHandler = true
}
}
if !anyHandler {
log.Printf("No handlers found")
}
}
|
// Package run provides an easy way to execute external commands. It's not a
// big package (less than 100 lines of code), but comes handy when one is using
// an external command inside a Go program. Because it gets stdin and returns
// stdout and stderr in []byte.
//
// stdout, stderr, err := Run("hello", "tr", "eo", "EO")
// // string(stdout) is now "hEllO"
package run
import (
"io/ioutil"
"os/exec"
)
// Run executes commands with given args and stdin and returns its stdout and
// stderr.
func Run(stdin []byte, command string, args ...string) (stdout, stderr []byte, err error) {
// Setup command and its stdin, stdout, and stderr.
cmd := exec.Command(command, args...)
stdinPipe, err := cmd.StdinPipe()
if err != nil {
return
}
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
return
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
return
}
// Start command execution. After calling Start stdin, stdout, and
// stderr will be writable and readable.
if err = cmd.Start(); err != nil {
return
}
// Write stdin data.
if _, err = stdinPipe.Write(stdin); err != nil {
return
}
if err = stdinPipe.Close(); err != nil {
return
}
// Read stderr.
if stderr, err = ioutil.ReadAll(stderrPipe); err != nil {
return
}
// Read stdout.
if stdout, err = ioutil.ReadAll(stdoutPipe); err != nil {
return
}
// Wait for the command to finish.
if err = cmd.Wait(); err != nil {
return
}
// Return result.
return
}
|
package command
import (
"fmt"
"termsnippet/util"
"github.com/ajpen/termsnippet/core"
"github.com/atotto/clipboard"
"gopkg.in/urfave/cli.v1"
)
func init() {
InstallCommand(newSnippetCommand())
}
func newSnippetCommand() cli.Command {
cmd := cli.Command{
Name: "new",
Description: "Create a new code snippet",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "clip, c",
Usage: "If set, the contents of the clipboard will be used as the snippet body",
},
cli.StringFlag{
Name: "description, d",
Usage: "code snippet description",
Value: "",
},
},
ArgsUsage: "Title (required) - Sets the title of the code snippet",
Action: func(c *cli.Context) error {
if c.NArg() <= 0 {
return fmt.Errorf("Snippet title argument missing")
}
var body, title, desc string
var err error
title = c.Args()[0]
if c.Bool("clip") {
body, err = clipboard.ReadAll()
if err != nil {
return fmt.Errorf("Unable to read from clipboard: %s", err)
}
} else {
body, err = util.OpenInEditor("")
if err != nil {
return fmt.Errorf("Unable to read from text editor: %s", err)
}
}
desc = c.String("description")
err = core.AddSnippet(title, desc, body)
if err != nil {
return fmt.Errorf("Unable to save snippet: %s", err)
}
return nil
},
}
return cmd
}
|
package logic
import (
"context"
"golang.org/x/crypto/bcrypt"
"software/car_port/model"
"software/car_port/pb_gen"
"software/common"
)
type UserLogic struct {
ctx context.Context
}
func NewUserLogic(ctx context.Context) (*UserLogic, common.BgErr) {
if err := common.AuthPermission(ctx, common.PermissionAdmin); !err.Is(common.Success) {
return nil, err
}
return &UserLogic{ctx: ctx}, common.Success
}
func (logic UserLogic) UpdateUser(user *pb_gen.User, password string) common.BgErr {
if user.Id < 0 {
return common.ParamErr
}
mUser := model.User{
Id: user.Id,
Cellphone: user.Cellphone,
Nickname: user.NickName,
Status: int(user.Status),
}
if password != "" {
encodePassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return common.CustomErr(common.InternalErr, err)
}
mUser.Password = string(encodePassword)
}
db, err := model.NewDbConnection()
if err != nil {
return common.CustomErr(common.DbErr, err)
}
err = model.UpdateUser(db, mUser)
if err != nil {
return common.CustomErr(common.DbErr, err)
}
return common.Success
}
func (logic UserLogic) GetUser(userId int64, cellphone string) (*pb_gen.User, common.BgErr) {
if userId < 0 {
return nil, common.ParamErr
}
db, err := model.NewDbConnection()
if err != nil {
return nil, common.CustomErr(common.DbErr, err)
}
user := model.User{}
if userId != 0 {
user, err = model.GetUser(db, userId)
} else {
user, err = model.GetUserByCellphone(db, cellphone)
}
if err != nil {
return nil, common.CustomErr(common.DbErr, err)
}
return packUser(user), common.Success
}
func (logic UserLogic) MGetUser(count int32, num int32) ([]*pb_gen.User, int32, common.BgErr) {
db, err := model.NewDbConnection()
if err != nil {
return nil, 0, common.CustomErr(common.DbErr, err)
}
offset := num * count
users, tableCount, err := model.MGetUser(db, offset, count)
if err != nil {
return nil, 0, common.CustomErr(common.DbErr, err)
}
var userList []*pb_gen.User
for _, value := range users {
userList = append(userList, packUser(value))
}
return userList, tableCount, common.Success
}
func packUser(user model.User) *pb_gen.User {
return &pb_gen.User{
Id: user.Id,
Cellphone: user.Cellphone,
NickName: user.Nickname,
BindCode: user.BindCode,
Status: pb_gen.UserStatus(user.Status),
}
}
|
/*
* @lc app=leetcode.cn id=1688 lang=golang
*
* [1688] 比赛中的配对次数
*/
// @lc code=start
package main
func numberOfMatches(n int) int {
if n < 2 {
return 0
} else {
return n/2 + numberOfMatches((n+1)/2)
}
}
// func main() {
// fmt.Println(numberOfMatches(14))
// }
// @lc code=end
|
package main
import (
"flag"
"net/http"
"sub_account_service/number_server/config"
"sub_account_service/number_server/models"
"sub_account_service/number_server/routers/query"
)
func main() {
flag.Parse()
models.Setup()
router := query.InitRouter()
err := http.ListenAndServe(config.Opts().Query_Server_Http, router)
if err != nil {
panic(err)
}
}
|
package model
import (
"log"
)
type User struct {
BaseModel
Name string `form:"name" gorm:"unique;not null" binding:"required"`
Email string `form:"email" binding:"email" gorm:"not null" binding:"required"`
Password string `form:"password" gorm:"not null" binding:"required"`
}
func init() {
}
func (user *User) Save() (id int64, err error) {
err = db.Create(user).Error
if err != nil {
log.Panicln("user insert error", err.Error())
return
}
id = int64(user.ID)
return
}
|
package company
import (
"fmt"
"net/http"
"github.com/gin-gonic/gin"
"github.com/rezwanul-haque/ID-Service/src/domain/companies"
"github.com/rezwanul-haque/ID-Service/src/services"
"github.com/rezwanul-haque/ID-Service/src/utils/consts"
"github.com/rezwanul-haque/ID-Service/src/utils/errors"
"github.com/rezwanul-haque/ID-Service/src/utils/helpers"
)
func Create(c *gin.Context) {
secretKeyHeader := c.GetHeader("SecretKey")
if helpers.IsInvalid(secretKeyHeader) || secretKeyHeader != consts.SecretKey {
keyErr := errors.NewInternalServerError(fmt.Sprintf("secrectkey: '%s' is missing or invalid", secretKeyHeader))
c.JSON(keyErr.Status, keyErr)
return
}
var company companies.Company
if err := c.ShouldBindJSON(&company); err != nil {
restErr := errors.NewBadRequestError("invalid json body")
c.JSON(restErr.Status, restErr)
return
}
result, saveErr := services.CompanyService.CreateCompany(company)
if saveErr != nil {
c.JSON(saveErr.Status, saveErr)
return
}
c.JSON(http.StatusCreated, result)
}
func CreateWithAdminUser(c *gin.Context) {
secretKeyHeader := c.GetHeader("SecretKey")
if helpers.IsInvalid(secretKeyHeader) || secretKeyHeader != consts.SecretKey {
keyErr := errors.NewInternalServerError(fmt.Sprintf("secrectkey: '%s' is missing or invalid", secretKeyHeader))
c.JSON(keyErr.Status, keyErr)
return
}
var company companies.CreateCompanyResponse
if err := c.ShouldBindJSON(&company); err != nil {
restErr := errors.NewBadRequestError("invalid json body")
c.JSON(restErr.Status, restErr)
return
}
result, saveErr := services.CompanyService.CreateCompanyWithAdminUser(company)
if saveErr != nil {
c.JSON(saveErr.Status, saveErr)
return
}
c.JSON(http.StatusCreated, result)
}
|
package example
import (
"fmt"
"github.com/xfstart07/gosms/luosimao"
)
func main() {
service := luosimao.New("apikey")
result, err := service.Send("you mobile", "你的验证码: 1231")
if err != nil {
fmt.Println("err")
}
fmt.Println(result.Code)
fmt.Println(result.Message)
}
|
package main
import (
"fmt"
"regexp"
)
var (
LinkPattern = `(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w\.-]*)*\/?`
text = `https://crawler.club是爬虫的主页哈哈`
)
func main() {
r := regexp.MustCompile(LinkPattern)
fmt.Println(r.FindAllString(text, -1))
}
|
// +build windows
package nio
import "syscall"
type Handle = syscall.Handle
|
// Copyright (c) 2020 Hirotsuna Mizuno. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file.
package speedio_test
import (
"io/ioutil"
"testing"
"time"
"github.com/tunabay/go-speedio"
)
//
func TestMeterWriter_test1(t *testing.T) {
t.Parallel()
w := speedio.NewMeterWriter(ioutil.Discard)
done := make(chan struct{})
go func() {
ticker := time.NewTicker(time.Second / 2)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.Log("bitrate:", w.BitRate())
case <-done:
return
}
}
}()
buf := make([]byte, 123000/8/5)
sum := 0
w.Start()
ticker := time.NewTicker(time.Second / 5)
for {
<-ticker.C
n, err := w.Write(buf)
if 0 < n {
sum += n
if 123000/8*5 <= sum {
break
}
}
if err != nil {
t.Error(err)
break
}
if n != len(buf) {
t.Errorf("unexpected len: want: %d, got: %d", len(buf), n)
}
}
ticker.Stop()
if err := w.Close(); err != nil {
t.Error(err)
}
close(done)
bc, et, br := w.Total()
t.Logf("total(n=%d): %v, %v, %v", sum, bc, et, br)
}
|
/*
* JALANKAN PERINTAH GO BUILD DI FOLDER INI UNTUK MENGCOMPILE PROGRAM
*/
package main
import (
"fmt"
"contoh_mvc/controllers"
"net/http"
)
func main() {
var SiswaController controllers.SiswaController = controllers.NewSiswaController();
// ROUTE
// route
http.HandleFunc("/siswa", SiswaController.ListSiswa)
http.HandleFunc("/siswa/edit", SiswaController.EditSiswa)
http.HandleFunc("/siswa/add", SiswaController.AddSiswa)
http.HandleFunc("/siswa/delete", SiswaController.DeleteSiswa)
// ROUTE UNTUK FILE STATIS
http.Handle("/assets/",
http.StripPrefix("/assets/",
http.FileServer(http.Dir("assets"))))
fmt.Println("starting web server at http://localhost:8080/")
http.ListenAndServe(":8080", nil)
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
const (
ascii_offset byte = 48
)
func checkErr(err error) {
if err != nil {
log.Fatalf("Error: %v", err)
}
}
func main() {
f, err := os.Open("input.txt")
checkErr(err)
defer f.Close()
scanner := bufio.NewScanner(f)
sum := 0
for scanner.Scan() {
set := make(map[string]bool)
hasDupe := false
values := strings.Split(scanner.Text(), " ")
for _, val := range values {
fmt.Println(val)
_, ok := set[val]
if ok {
hasDupe = true
break
} else {
set[val] = true
}
}
if !hasDupe {
sum += 1
}
fmt.Println(sum)
}
checkErr(scanner.Err())
}
|
package main
import (
"fmt"
"log"
"net/http"
"strconv"
"sync"
)
var counter int
var mutex = &sync.Mutex{}
func incrementCount(w http.ResponseWriter,r *http.Request){
mutex.Lock()
defer mutex.Unlock()
counter++
fmt.Fprintf(w, strconv.Itoa(counter))
}
func main() {
http.Handle("/", http.FileServer(http.Dir("./static")))
http.HandleFunc("/increment", incrementCount)
log.Fatal(http.ListenAndServe(":8081", nil))
}
|
package saml
import (
"encoding/xml"
"testing"
"time"
"github.com/pkg/errors"
"github.com/sergi/go-diff/diffmatchpatch"
"github.com/stretchr/testify/assert"
)
var testSP = &ServiceProvider{
PrivkeyPEM: `-----BEGIN PRIVATE KEY-----
MIIJKwIBAAKCAgEA8eAiAD/qbOh+PBCOYWFjuVbweHUAb/958G0hF+3ciWCqBDzO
YUO8Gij+S9YBSZhEMT8tCI+yMm4wXmDBHA3nAs6meiZ97KK6sU51IFPuVIuq+1lY
/fkf4Wm5dpP+cnZi6lg0cAFvp7S95czck3Yp2gqxW2zRG82d/KL9p4rGeaCuetER
FHTIF0ohyzDZbiEacHheny9cbxk1fNllNUgythQA4JWMf4G+IHjCqTrcro27gqet
WYIQDLD3Gkh4eUwaY6cd3KhJQGWJxswmX3S7GOMTM3l4Cfoner63MV5H/ZzoEwld
VFnZgNv8taEqhpcryTDPDIhDFShdztKjPi/65ZI0qH2glLLvntPQqVH5y7jPejW2
mMO/9BlzrE33Ilvv9WDiyFtWd4e6lrnUr8ehGJcP8MSYDd1p6sS9yJ2P9euuK6L4
nuyNdDuZi7r8hIOspKdfhHucnkWtB6I3/c0ClUEY7Vuo6Lp2G/8Y1CsBLl2Z0IeP
IvXTiS/ID8bup0nWIlUjU+VASZTyQimDFjKswxlaEF8zX/7o9jyvnv80mb8bu7t4
UpBDWGTXH17u499ONx1sIHBI+I+/Ln1wKRD0/k1o5phqmYaiq7JGfDdmipEOcLc8
q223jsPM3k4raZsvJoKmmi5vNiZl/6UbSfB11x3aynQZ0YdYtPRqI9oa3PMCAwEA
AQKCAgEA1rVfeVlDf+niJO+NdGQ/YgcK7+LswH7If+RfvB4p5skoIxrXGQBHufEp
y6fs/Kdt4UlzcGYeiSXHSgAZbA3rQ1Kt9UC2B5lsoHhFAK2AowxYe0aU+N5srmxr
dhdph7IPnHcwFT0xIG4RJCz2oPADtspHJiEyfrvHwrvs7w0BonZAbEWqI76G4CWu
WfDDEj/QeIZheG1SYEzAblOMw/+TBI49OR+H2KgTXj/UjOTzgP/Ps+uktg/+r0Vo
FKzAROyJgGyY1YeNftyjsRUH+zRj4XOxV8A8Dp9A7HTfqbNHtJnUaRGnB3m62ehu
K80lMtR+ydnJ8hYDdoSewTm6LznoPKfYmHEdO9TyilcKGxmqqDosy5jlOK9tk76H
vMoq5jjQcPE1VRRKUP9sZbX1FJkaqXJvEUETn/EDClSz/4z5PsftnaPW7q0heTaJ
mr1fKfWcDUjX7Nfm6Ndu1zlBGO5wJNa/KGjqLWcz5jk++60SLx1qYyfjg30mmD0Y
lnGUzOoqBfCGjD/4W+X7kh/KrWFO5xIdUFmPV1tnac/Eu90e9zvbg2U5dPfFxbIv
Raq484S0ZOu/II+C8TRVwd1MmVjMrysM4p7Yo9PPRmdrudamAifGO7rOMaBqvMTZ
Q2+u9YWOxzc/kVrSgLy02/RbjcjsgBjHMy2NGG/p7Z3XdG0nOAECggEBAP/SKCew
8bIbZQ3A6FT/IwKYQm1zi6JyQAko38hhWVyCMOTVYczs4uMUoGlgaSxr7COO3ZIx
kIccZ3C1Q4lzqO9vxqrJfJikuP0znWvBUGij08LoG5BbQaKCnbvOKQSNnGAxF/Bu
kHiWsIgeDd2d1YxTQsq+PX0WLX6xzfGk7OQo8kgFRDjmY5NBI7WsZ8K22mSM+Do2
7Xpj0sQH0bTGgscyFJovA5knc0p1TZ6XHs4mPtFcFLoGTLpiJhgWwet3XE4hmGkb
LLXa+xmlKv+0aJd9PV5yy42R1xRlcYi6T+/vD7Y4m0Y6swnEq3oX1jGz3B6kLGvx
dUkiFgRASxvzfTECggEBAPILehjeQncwvmDd93d2SRPnxfq9e2/9tP79711rU/cF
YaA2xbRLSEdw2kgrSaRLISDhGA3odN4s3VhD2rByJUVFjSpdPVigLys8LITEdwqs
beDsVLaBAa3DagFXuhnOuguN/3ybaIPop4jXDnyj/1T3IcsmUglPte/08GXaxfLP
5d2/ut/ebDWwlyaLtdkGNOHXpP+r99JBPkPJQ7UMjorXEZUMpKUtpg3B4/FaK98h
319WHfQP+Znow3Q2PZUhOUSTkQUWPia7vwAMeKKTC2JLw4kvWMVEkXS6d/88RadJ
48weQdGEX49Mcz9T/QTohhkXTdE9LKk4vnJA3Bof42MCggEBAPnRQZNZEP3sEKf4
rSlrqcW76Iq33jE5vtzzBG3K2xgirxqYYhRbdElq+CdPlgViMsyalDdSnZ/DliQT
byuIPf3sOqbHchwiJ+BjiiQTOLGm4oGgZmJ3K0ZGpUAkWBvxKjcpWgZaAk0wYp3a
M5IqssKBAGW6l5NSmAT0H3gNpaQ9dDPuqKukGLNRVkzwWrdkFytAGpvGzevKFaWH
OTt+63EYr9PNe4cRZl3c5XqsetG3uXp7oGX1BvKwLCE0ABUwj3xhBFQHzIaenL1z
dOUWSVk+XTKhibPrKozpb5Ck2LEm1EIPT1qqsfIlE4t8QZhx2tA1ZIfY2L8dQUDP
hEl8YtECggEBALdTZgkL3r+0FZ38wQCkgLko5VUCy0mJmdtArlnNfu0sEENu+NOT
6YzitxHOZ5qepNroYnW2ST42MPg3fJ4D9qK/CSq7MEm+GbvfecCdpaRZ8WdY2Nja
YVEhH1sg/P2rDFLQHGBevQyb3LjSWlChTjUmcVwTDDOtQhobJTGgQCmmuW35WCtT
phYMSI+TZgqYntknoftcXvnLHMPu3u90MUqLlu+Tgejx6EGnR2R4bZ41Y6Ar88f1
iZG/MLsGkiIv5dZBBqgZrr1bmlEJIR3Rzd+HjvSK/et8AcetrFGPbxpD5tulVyi/
4DkDKI8gqBVdoKPEeNYwRXFuXyDea1cGLA0CggEBAIYTd5kVatU/PhwF27AB5sQi
cIs/D7f6yzh5bTTZmdZzz6xBIRMASkALrHGgcrhtAaHF0BOpUquhDy0YAq3WcRjQ
KefAX3F3tiCeG2WluhZoSDarWCEYGP9SrfNtHyZLnqr33ROrpqTNLXLm2MDTSGzs
YhCD/gT9+TrfcQl6iOtiZ+l4AWIf/LgnPsNgfbOzSgHP0V686cM4hBjDKI8esDyW
sCuRa6MrX1SwANgDeYnmhOIA4vbsTREprojHSMGOiNZdPgUbnqfYBWfbzLgginO5
gF0pEyxohPoiogV4S0MUhTLUgQrLdmnQ4zr7L4ac75LRue7XlAztLI/3arawnrg=
-----END PRIVATE KEY-----`,
PubkeyPEM: `-----BEGIN CERTIFICATE-----
MIIFqTCCA5GgAwIBAgIJANnmNJJ15Nh+MA0GCSqGSIb3DQEBCwUAMGsxCzAJBgNV
BAYTAkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYD
VQQKDAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5
LmNvbTAeFw0xNzA4MjYwMDA4MThaFw0yNzA4MjQwMDA4MThaMGsxCzAJBgNVBAYT
AkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYDVQQK
DAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5LmNv
bTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPHgIgA/6mzofjwQjmFh
Y7lW8Hh1AG//efBtIRft3IlgqgQ8zmFDvBoo/kvWAUmYRDE/LQiPsjJuMF5gwRwN
5wLOpnomfeyiurFOdSBT7lSLqvtZWP35H+FpuXaT/nJ2YupYNHABb6e0veXM3JN2
KdoKsVts0RvNnfyi/aeKxnmgrnrRERR0yBdKIcsw2W4hGnB4Xp8vXG8ZNXzZZTVI
MrYUAOCVjH+BviB4wqk63K6Nu4KnrVmCEAyw9xpIeHlMGmOnHdyoSUBlicbMJl90
uxjjEzN5eAn6J3q+tzFeR/2c6BMJXVRZ2YDb/LWhKoaXK8kwzwyIQxUoXc7Soz4v
+uWSNKh9oJSy757T0KlR+cu4z3o1tpjDv/QZc6xN9yJb7/Vg4shbVneHupa51K/H
oRiXD/DEmA3daerEvcidj/Xrriui+J7sjXQ7mYu6/ISDrKSnX4R7nJ5FrQeiN/3N
ApVBGO1bqOi6dhv/GNQrAS5dmdCHjyL104kvyA/G7qdJ1iJVI1PlQEmU8kIpgxYy
rMMZWhBfM1/+6PY8r57/NJm/G7u7eFKQQ1hk1x9e7uPfTjcdbCBwSPiPvy59cCkQ
9P5NaOaYapmGoquyRnw3ZoqRDnC3PKttt47DzN5OK2mbLyaCppoubzYmZf+lG0nw
ddcd2sp0GdGHWLT0aiPaGtzzAgMBAAGjUDBOMB0GA1UdDgQWBBSjhCS8oXZKkctM
4QyAzLyFSJuaLTAfBgNVHSMEGDAWgBSjhCS8oXZKkctM4QyAzLyFSJuaLTAMBgNV
HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQDjxydOEhvcpLM3Xoz28dlw4CsU
9qev6Lokv5K4fj7qMFi6zkjSVrzQ8C0T2WfuU8eReTXhCwUbT+Vq2X5+S3zplmRh
HmbKbclkj0C2LfQpqdqs6JGke9PsQOxkhzcIF4CDqMSrN6q60UeRPxQ8HM0tkh7E
IXp83NINHOULDJgGl9yGGpiV00r0iPDh+y6rGEZMoKw1WOUghLkmMLemd8tELXDO
Rgaofsjz14y3le7JiWkaKA6EbmJReSDrmjuqp0O2cs3bqUsHlLQ20VtrmPS1Lw6j
ABujC6NA0CxwwIY5MRRRnXjTrc31CRlBRhM9f9YpEeZuCy3k7UuK6zeP0cAY3Jtt
78SMLxzemJu4RRNqFypTwue1uBlDC+zO6Cpjh+D54laptRfFIg/bZ91zR3KOESAs
vEfVG9CShRxHocy6Q+6oy852Ry6T8blVP6/SOlvB9A++cMoO/idDQ4yGIKicM98z
cenf72Hn3I1h5BiGNM8TBkZQ1OvZ/ItrtQvMAA0x4tbHI4YU0Z8SvKsDoxmCnnby
npL/7HCzPNd56hQq0EyHGtowZmqP9bZ7geyCnAHd449vL/drGSGyvElN6QsQChvZ
zQUwDSgIrjoMPWcFNGu2pzSnQWWU7BB+DpX3jb7kHC/mLFj3M2Fxv7bCK51HWI6h
3/+aZDnC9gbMWMgwWA==
-----END CERTIFICATE-----`,
MetadataURL: "http://localhost:1235/saml/service.xml",
ACSURL: "http://localhost:1235/saml/acs",
IdPSSOServiceURL: testIdP.SSOURL,
}
func TestGenerateSPMetadata(t *testing.T) {
tearUp()
metadata, err := testSP.Metadata()
assert.NoError(t, err)
assert.NotNil(t, metadata)
out, err := xml.MarshalIndent(metadata, "", "\t")
assert.NoError(t, err)
expectedOutput := `<EntityDescriptor xmlns="urn:oasis:names:tc:SAML:2.0:metadata" validUntil="` + Now().Add(defaultValidDuration).Format(time.RFC3339Nano) + `" entityID="http://localhost:1235/saml/service.xml">
<SPSSODescriptor xmlns="urn:oasis:names:tc:SAML:2.0:metadata" AuthnRequestsSigned="false" WantAssertionsSigned="true" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<KeyDescriptor use="signing">
<KeyInfo xmlns="http://www.w3.org/2000/09/xmldsig#">
<X509Data>
<X509Certificate>MIIFqTCCA5GgAwIBAgIJANnmNJJ15Nh+MA0GCSqGSIb3DQEBCwUAMGsxCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYDVQQKDAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5LmNvbTAeFw0xNzA4MjYwMDA4MThaFw0yNzA4MjQwMDA4MThaMGsxCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYDVQQKDAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5LmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPHgIgA/6mzofjwQjmFhY7lW8Hh1AG//efBtIRft3IlgqgQ8zmFDvBoo/kvWAUmYRDE/LQiPsjJuMF5gwRwN5wLOpnomfeyiurFOdSBT7lSLqvtZWP35H+FpuXaT/nJ2YupYNHABb6e0veXM3JN2KdoKsVts0RvNnfyi/aeKxnmgrnrRERR0yBdKIcsw2W4hGnB4Xp8vXG8ZNXzZZTVIMrYUAOCVjH+BviB4wqk63K6Nu4KnrVmCEAyw9xpIeHlMGmOnHdyoSUBlicbMJl90uxjjEzN5eAn6J3q+tzFeR/2c6BMJXVRZ2YDb/LWhKoaXK8kwzwyIQxUoXc7Soz4v+uWSNKh9oJSy757T0KlR+cu4z3o1tpjDv/QZc6xN9yJb7/Vg4shbVneHupa51K/HoRiXD/DEmA3daerEvcidj/Xrriui+J7sjXQ7mYu6/ISDrKSnX4R7nJ5FrQeiN/3NApVBGO1bqOi6dhv/GNQrAS5dmdCHjyL104kvyA/G7qdJ1iJVI1PlQEmU8kIpgxYyrMMZWhBfM1/+6PY8r57/NJm/G7u7eFKQQ1hk1x9e7uPfTjcdbCBwSPiPvy59cCkQ9P5NaOaYapmGoquyRnw3ZoqRDnC3PKttt47DzN5OK2mbLyaCppoubzYmZf+lG0nwddcd2sp0GdGHWLT0aiPaGtzzAgMBAAGjUDBOMB0GA1UdDgQWBBSjhCS8oXZKkctM4QyAzLyFSJuaLTAfBgNVHSMEGDAWgBSjhCS8oXZKkctM4QyAzLyFSJuaLTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQDjxydOEhvcpLM3Xoz28dlw4CsU9qev6Lokv5K4fj7qMFi6zkjSVrzQ8C0T2WfuU8eReTXhCwUbT+Vq2X5+S3zplmRhHmbKbclkj0C2LfQpqdqs6JGke9PsQOxkhzcIF4CDqMSrN6q60UeRPxQ8HM0tkh7EIXp83NINHOULDJgGl9yGGpiV00r0iPDh+y6rGEZMoKw1WOUghLkmMLemd8tELXDORgaofsjz14y3le7JiWkaKA6EbmJReSDrmjuqp0O2cs3bqUsHlLQ20VtrmPS1Lw6jABujC6NA0CxwwIY5MRRRnXjTrc31CRlBRhM9f9YpEeZuCy3k7UuK6zeP0cAY3Jtt78SMLxzemJu4RRNqFypTwue1uBlDC+zO6Cpjh+D54laptRfFIg/bZ91zR3KOESAsvEfVG9CShRxHocy6Q+6oy852Ry6T8blVP6/SOlvB9A++cMoO/idDQ4yGIKicM98zcenf72Hn3I1h5BiGNM8TBkZQ1OvZ/ItrtQvMAA0x4tbHI4YU0Z8SvKsDoxmCnnbynpL/7HCzPNd56hQq0EyHGtowZmqP9bZ7geyCnAHd449vL/drGSGyvElN6QsQChvZzQUwDSgIrjoMPWcFNGu2pzSnQWWU7BB+DpX3jb7kHC/mLFj3M2Fxv7bCK51HWI6h3/+aZDnC9gbMWMgwWA==</X509Certificate>
</X509Data>
</KeyInfo>
</KeyDescriptor>
<KeyDescriptor use="encryption">
<KeyInfo xmlns="http://www.w3.org/2000/09/xmldsig#">
<X509Data>
<X509Certificate>MIIFqTCCA5GgAwIBAgIJANnmNJJ15Nh+MA0GCSqGSIb3DQEBCwUAMGsxCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYDVQQKDAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5LmNvbTAeFw0xNzA4MjYwMDA4MThaFw0yNzA4MjQwMDA4MThaMGsxCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRhcmlvMRAwDgYDVQQHDAdUb3JvbnRvMRAwDgYDVQQKDAdQcmVzc2x5MQwwCgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5wcmVzc2x5LmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPHgIgA/6mzofjwQjmFhY7lW8Hh1AG//efBtIRft3IlgqgQ8zmFDvBoo/kvWAUmYRDE/LQiPsjJuMF5gwRwN5wLOpnomfeyiurFOdSBT7lSLqvtZWP35H+FpuXaT/nJ2YupYNHABb6e0veXM3JN2KdoKsVts0RvNnfyi/aeKxnmgrnrRERR0yBdKIcsw2W4hGnB4Xp8vXG8ZNXzZZTVIMrYUAOCVjH+BviB4wqk63K6Nu4KnrVmCEAyw9xpIeHlMGmOnHdyoSUBlicbMJl90uxjjEzN5eAn6J3q+tzFeR/2c6BMJXVRZ2YDb/LWhKoaXK8kwzwyIQxUoXc7Soz4v+uWSNKh9oJSy757T0KlR+cu4z3o1tpjDv/QZc6xN9yJb7/Vg4shbVneHupa51K/HoRiXD/DEmA3daerEvcidj/Xrriui+J7sjXQ7mYu6/ISDrKSnX4R7nJ5FrQeiN/3NApVBGO1bqOi6dhv/GNQrAS5dmdCHjyL104kvyA/G7qdJ1iJVI1PlQEmU8kIpgxYyrMMZWhBfM1/+6PY8r57/NJm/G7u7eFKQQ1hk1x9e7uPfTjcdbCBwSPiPvy59cCkQ9P5NaOaYapmGoquyRnw3ZoqRDnC3PKttt47DzN5OK2mbLyaCppoubzYmZf+lG0nwddcd2sp0GdGHWLT0aiPaGtzzAgMBAAGjUDBOMB0GA1UdDgQWBBSjhCS8oXZKkctM4QyAzLyFSJuaLTAfBgNVHSMEGDAWgBSjhCS8oXZKkctM4QyAzLyFSJuaLTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQDjxydOEhvcpLM3Xoz28dlw4CsU9qev6Lokv5K4fj7qMFi6zkjSVrzQ8C0T2WfuU8eReTXhCwUbT+Vq2X5+S3zplmRhHmbKbclkj0C2LfQpqdqs6JGke9PsQOxkhzcIF4CDqMSrN6q60UeRPxQ8HM0tkh7EIXp83NINHOULDJgGl9yGGpiV00r0iPDh+y6rGEZMoKw1WOUghLkmMLemd8tELXDORgaofsjz14y3le7JiWkaKA6EbmJReSDrmjuqp0O2cs3bqUsHlLQ20VtrmPS1Lw6jABujC6NA0CxwwIY5MRRRnXjTrc31CRlBRhM9f9YpEeZuCy3k7UuK6zeP0cAY3Jtt78SMLxzemJu4RRNqFypTwue1uBlDC+zO6Cpjh+D54laptRfFIg/bZ91zR3KOESAsvEfVG9CShRxHocy6Q+6oy852Ry6T8blVP6/SOlvB9A++cMoO/idDQ4yGIKicM98zcenf72Hn3I1h5BiGNM8TBkZQ1OvZ/ItrtQvMAA0x4tbHI4YU0Z8SvKsDoxmCnnbynpL/7HCzPNd56hQq0EyHGtowZmqP9bZ7geyCnAHd449vL/drGSGyvElN6QsQChvZzQUwDSgIrjoMPWcFNGu2pzSnQWWU7BB+DpX3jb7kHC/mLFj3M2Fxv7bCK51HWI6h3/+aZDnC9gbMWMgwWA==</X509Certificate>
</X509Data>
</KeyInfo>
<EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"></EncryptionMethod>
<EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes192-cbc"></EncryptionMethod>
<EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes256-cbc"></EncryptionMethod>
<EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"></EncryptionMethod>
</KeyDescriptor>
<AssertionConsumerService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="http://localhost:1235/saml/acs" index="1"></AssertionConsumerService>
</SPSSODescriptor>
</EntityDescriptor>`
assert.Equal(t, expectedOutput, string(out))
}
func TestMakeAuthenticationRequest(t *testing.T) {
tearUp()
req, err := testSP.NewAuthnRequest()
if err != nil {
t.Fatal(errors.Wrap(err, "failed to create new AuthnRequest"))
}
out, err := xml.MarshalIndent(req, "", "\t")
if err != nil {
t.Fatal(errors.Wrap(err, "failed to marshal indent AuthnRequest"))
}
expectedOutput := `<samlp:AuthnRequest xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ID="id-MOCKID" Version="2.0" IssueInstant="` + Now().Format(SAMLTimeFormat) + `" Destination="http://localhost:1233/saml/sso" AssertionConsumerServiceURL="http://localhost:1235/saml/acs" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST">
<Issuer xmlns="urn:oasis:names:tc:SAML:2.0:assertion" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://localhost:1235/saml/service.xml</Issuer>
<samlp:NameIDPolicy AllowCreate="true" Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"></samlp:NameIDPolicy>
</samlp:AuthnRequest>`
if string(out) != expectedOutput {
t.Log(diffmatchpatch.New().DiffPrettyText(diffmatchpatch.New().DiffMain(string(out), expectedOutput, true)))
t.Fatal("unexpected output")
}
}
|
package main
import (
"fmt"
"runtime"
"time"
"github.com/jchiu0/experimental/wstring"
)
func main() {
s := wstring.NewWString()
s.Set([]byte("helloworld")) // This is a copy. No worries about double freeing.
data := s.Get()
fmt.Printf("[%s]\n", string(data))
fmt.Printf("Length = %d\n", s.Size())
// s is no longer used. We expect it to be destroyed when GC runs.
// We want to make sure that std::string's destructor is run.
// Make sure we see "Destroying wstring" before we see "Exiting".
fmt.Println("GC start")
runtime.GC()
fmt.Println("GC end")
time.Sleep(time.Second) // Give GC a bit of time.
fmt.Println("Exiting")
}
|
package common
import "encoding/json"
func MarshalBind(src, dsc interface{}) error {
data, err := json.Marshal(src)
if err != nil {
return err
}
return json.Unmarshal(data, dsc)
}
|
package unionfind
// Quick Union
// 元素 0 1 2 3 4 5 6 7 8 9
// -------------------
// parent 0 1 2 3 4 5 6 7 8 9
// parent[i]:元素i的父亲元素
type UnionFind2 struct {
parent []int
count int // 元素个数
}
func NewUnionFind2(n int) *UnionFind2 {
uf := new(UnionFind2)
uf.count = n
uf.parent = make([]int, n)
for i := 0; i < n; i++ {
// 初始化时,父亲元素指向自己,两两元素互不连接
uf.parent[i] = i
}
return uf
}
// 参数:元素;返回:根节点
// O(1)
func (uf UnionFind2) Find(p int) int {
if p >= 0 && p <= uf.count {
for p != uf.parent[p] {
p = uf.parent[p]
}
return p
}
return -1
}
func (uf UnionFind2) IsConnected(p, q int) bool {
return uf.Find(p) == uf.Find(q)
}
func (uf *UnionFind2) Union(p, q int) {
pRoot := uf.Find(p)
qRoot := uf.Find(q)
if pRoot == qRoot {
return
}
uf.parent[pRoot] = qRoot
}
|
package geekdo
// CollectionItems is the root node of a collection request.
type CollectionItems struct {
TotalItems int `xml:"totalitems,attr"`
TermsOfUse string `xml:"termsofuse,attr"`
PubDate string `xml:"pubdate,attr"`
Items []CollectionItem `xml:"item"`
}
// CollectionItem is an item inside a collection, such as a game.
type CollectionItem struct {
ObjectType string `xml:"objecttype,attr"`
ObjectID int `xml:"objectid,attr"`
SubType string `xml:"subtype,attr"`
CollID int `xml:"collid,attr"`
Name CollectionItemName `xml:"name"`
YearPublished int `xml:"yearpublished"`
Image string `xml:"image"`
Thumbnail string `xml:"thumbnail"`
Stats CollectionItemStats `xml:"stats"`
Status CollectionItemStatus `xml:"status"`
NumPlays int `xml:"numplays"`
}
// CollectionItemName is a name and sort index for a collection item.
type CollectionItemName struct {
SortIndex int `xml:"sortindex,attr"`
Value string `xml:",chardata"`
}
// CollectionItemStatus describes the status of the game for the subject user.
type CollectionItemStatus struct {
Own bool `xml:"own,attr"`
PrevOwned bool `xml:"prevowned,attr"`
ForTrade bool `xml:"fortrade,attr"`
Want bool `xml:"want,attr"`
WantToPlay bool `xml:"wanttoplay,attr"`
WantToBuy bool `xml:"wanttobuy,attr"`
Wishlist bool `xml:"wishlist,attr"`
Preordered bool `xml:"preordered,attr"`
LastModified string `xml:"lastmodified,attr"`
}
// CollectionItemStats are stats related to the item.
type CollectionItemStats struct {
MinPlayers int `xml:"minplayers,attr"`
MaxPlayers int `xml:"maxplayers,attr"`
PlayingTime int `xml:"playingtime,attr"`
NumOwned int `xml:"numowned,attr"`
Rating CollectionItemStatsRating `xml:"rating"`
}
// CollectionItemStatsRating is a rating and stats regarding ratings.
type CollectionItemStatsRating struct {
Value int `xml:"value,attr"`
UsersRated IntValue `xml:"usersrated"`
Average FloatValue `xml:"average"`
BayesAverage FloatValue `xml:"bayesaverage"`
StdDev FloatValue `xml:"stddev"`
Median IntValue `xml:"median"`
Ranks []ThingStatsRank `xml:"ranks>rank"`
}
|
package mock
import (
"context"
"time"
"github.com/odpf/optimus/core/progress"
"github.com/odpf/optimus/models"
"github.com/stretchr/testify/mock"
)
type Scheduler struct {
mock.Mock
}
func (ms *Scheduler) VerifyJob(ctx context.Context, namespace models.NamespaceSpec, job models.JobSpec) error {
args := ms.Called(ctx, namespace, job)
return args.Error(0)
}
func (ms *Scheduler) ListJobs(ctx context.Context, namespace models.NamespaceSpec, opts models.SchedulerListOptions) ([]models.Job, error) {
args := ms.Called(ctx, namespace, opts)
return args.Get(0).([]models.Job), args.Error(1)
}
func (ms *Scheduler) DeployJobs(ctx context.Context, namespace models.NamespaceSpec, jobs []models.JobSpec, obs progress.Observer) error {
args := ms.Called(ctx, namespace, jobs, obs)
return args.Error(0)
}
func (ms *Scheduler) DeleteJobs(ctx context.Context, namespace models.NamespaceSpec, jobNames []string, obs progress.Observer) error {
args := ms.Called(ctx, namespace, jobNames, obs)
return args.Error(0)
}
func (ms *Scheduler) GetName() string {
return "mocked"
}
func (ms *Scheduler) Bootstrap(ctx context.Context, projectSpec models.ProjectSpec) error {
return ms.Called(ctx, projectSpec).Error(0)
}
func (ms *Scheduler) GetJobStatus(ctx context.Context, projSpec models.ProjectSpec, jobName string) ([]models.JobStatus, error) {
args := ms.Called(ctx, projSpec, jobName)
return args.Get(0).([]models.JobStatus), args.Error(1)
}
func (ms *Scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobName string, startDate, endDate time.Time) error {
args := ms.Called(ctx, projSpec, jobName, startDate, endDate)
return args.Error(0)
}
func (ms *Scheduler) GetJobRunStatus(ctx context.Context, projectSpec models.ProjectSpec, jobName string, startDate time.Time,
endDate time.Time, batchSize int) ([]models.JobStatus, error) {
args := ms.Called(ctx, projectSpec, jobName, startDate, endDate, batchSize)
return args.Get(0).([]models.JobStatus), args.Error(1)
}
|
package storage
// AccessTokenService provides access to AccessToken objects.
type AccessTokenService interface {
Get(token string) (*AccessToken, error)
Put(token string, at AccessToken) error
}
|
package main
import "fmt"
func main() {
var x = 4
var p *int
p = &x
fmt.Println(p)
fnNew()
}
func fnNew() {
p := new(int)
fmt.Println("Value:", *p) // Value: 0 - значение по умолчанию
*p = 8 // изменяем значение
fmt.Println("Value:", *p) // Value: 8
newChange()
}
func changeValue(x *int) {
*x = (*x) * (*x)
}
func newChange() {
d := 5
fmt.Println("d before:", d) // 5
changeValue(&d) // изменяем значение
fmt.Println("d after:", d) // 25 - значение изменилось!
}
|
package models
type VirtualizationInfo struct {
System string
Role string
}
|
package api
import "testing"
func TestDefMatch(t *testing.T) {
s1 := DefMatch("")
if s1 != ".*" {
t.Error("Expected \".*\" got ", s1)
}
s2 := DefMatch("abc")
if s2 != "abc" {
t.Error("Expected \"abc\" got ", s2)
}
}
|
package catp
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00600102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catp.006.001.02 Document"`
Message *ATMInquiryRequestV02 `xml:"ATMNqryReq"`
}
func (d *Document00600102) AddMessage() *ATMInquiryRequestV02 {
d.Message = new(ATMInquiryRequestV02)
return d.Message
}
// The ATMInquiryRequest message is sent by an ATM to an ATM manager to request information about a customer (for example card, account).
type ATMInquiryRequestV02 struct {
// Information related to the protocol management on a segment of the path from the ATM to the acquirer.
Header *iso20022.Header31 `xml:"Hdr"`
// Encrypted body of the message.
ProtectedATMInquiryRequest *iso20022.ContentInformationType10 `xml:"PrtctdATMNqryReq,omitempty"`
// Information related to the request of an inquiry from an ATM.
ATMInquiryRequest *iso20022.ATMInquiryRequest2 `xml:"ATMNqryReq,omitempty"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *ATMInquiryRequestV02) AddHeader() *iso20022.Header31 {
a.Header = new(iso20022.Header31)
return a.Header
}
func (a *ATMInquiryRequestV02) AddProtectedATMInquiryRequest() *iso20022.ContentInformationType10 {
a.ProtectedATMInquiryRequest = new(iso20022.ContentInformationType10)
return a.ProtectedATMInquiryRequest
}
func (a *ATMInquiryRequestV02) AddATMInquiryRequest() *iso20022.ATMInquiryRequest2 {
a.ATMInquiryRequest = new(iso20022.ATMInquiryRequest2)
return a.ATMInquiryRequest
}
func (a *ATMInquiryRequestV02) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
package cache
import (
"encoding/json"
"time"
"gopkg.in/redis.v3"
)
const redisStoreName = "Redis Store"
// NewRedisStore creates a new redis store using the given client
func NewRedisStore(client *redis.Client) Store {
return &redisstore{
client: client,
config: StoreConfig{StoreName: redisStoreName},
}
}
type redisstore struct {
client *redis.Client
config StoreConfig
}
func (rs *redisstore) Store(cacheKey string, data []byte) error {
bytes, err := json.Marshal(cacheddata{data, time.Now()})
if err != nil {
return err
}
return rs.client.Set(cacheKey, bytes, 0).Err()
}
func (rs *redisstore) Retrieve(cacheKey string) (*cacheddata, bool, error) {
cachedBytes, err := rs.client.Get(cacheKey).Bytes()
if err != nil && err != redis.Nil {
return nil, false, err
}
if err == redis.Nil {
return nil, false, nil
}
cachedData := &cacheddata{}
if err := json.Unmarshal(cachedBytes, cachedData); err != nil {
return nil, false, err
}
return cachedData, true, nil
}
func (rs *redisstore) Delete(cacheKey string) error {
_, err := rs.client.Del(cacheKey).Result()
return err
}
func (rs *redisstore) Config() StoreConfig {
return rs.config
}
func (rs *redisstore) SetConfig(config StoreConfig) {
rs.config = config
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"golang.org/x/net/context"
)
func dumpContainer(cli *client.Client, container types.Container, baseLogsDir string) {
demoName := container.Labels["rootnroll.demo.name"]
createdAt := time.Unix(container.Created, 0).UTC()
logsFilename := fmt.Sprintf("%s_%s.log", createdAt.Format("2006-01-02T15-04-05"), container.ID[:12])
demoLogsDir := filepath.Join(baseLogsDir, demoName)
logsFilepath := filepath.Join(demoLogsDir, logsFilename)
if _, err := os.Stat(logsFilepath); !os.IsNotExist(err) {
// Do not dump the already dumped container
return
}
if err := os.MkdirAll(demoLogsDir, os.ModePerm); err != nil {
fmt.Println("Failed to create a directory for demo logs:", err)
return
}
logsOptions := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
output, err := cli.ContainerLogs(context.Background(), container.ID, logsOptions)
if err != nil {
fmt.Println("Failed to fetch container logs:", err)
return
}
// Write logs to a file
logsFile, err := os.Create(logsFilepath)
if err != nil {
fmt.Println("Failed to create a file:", err)
return
}
defer logsFile.Close()
logsWriter := bufio.NewWriter(logsFile)
if _, err := io.Copy(logsWriter, output); err != nil {
fmt.Println("Failed to write logs to the file:", err)
return
}
if err := logsWriter.Flush(); err != nil {
fmt.Println("Failed to write logs to the file:", err)
return
}
fmt.Printf("Dumped container: %s (%s)\n", container.ID[:12], demoName)
}
func main() {
if len(os.Args) < 3 {
fmt.Printf("Dump logs of rootnroll demo containers created more than LIFETIME minutes ago.\n\n")
fmt.Printf("Usage: %s BASE_LOGS_DIR LIFETIME\n", os.Args[0])
os.Exit(1)
}
baseLogsDir := os.Args[1]
lifetime, err := strconv.Atoi(os.Args[2])
if err != nil {
panic(err)
}
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
filters := filters.NewArgs()
filters.Add("label", "rootnroll.demo.name")
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: filters,
})
if err != nil {
panic(err)
}
for _, container := range containers {
createdAt := time.Unix(container.Created, 0)
if int(time.Now().Sub(createdAt).Minutes()) >= lifetime {
// Dump a container created more than 20 minutes ago
dumpContainer(cli, container, baseLogsDir)
}
}
}
|
package timer
import (
"context"
"errors"
"fmt"
"os"
"runtime"
"sync"
"testing"
"time"
)
func TestMain(m *testing.M) {
defaultWheel = newWheel(context.Background(), time.Millisecond, 1000)
// call flag.Parse() here if TestMain uses flags
os.Exit(m.Run())
}
func TestAfterFunc(t *testing.T) {
i := 10
c := make(chan bool)
var f func()
f = func() {
i--
if i >= 0 {
AfterFunc(0, f)
time.Sleep(1 * time.Second)
} else {
c <- true
}
}
AfterFunc(0, f)
<-c
}
func benchmark(b *testing.B, bench func(n int)) {
// Create equal number of garbage timers on each P before starting
// the benchmark.
var wg sync.WaitGroup
garbageAll := make([][]Timer, runtime.GOMAXPROCS(0))
for i := range garbageAll {
wg.Add(1)
go func(i int) {
defer wg.Done()
garbage := make([]Timer, 1<<15)
for j := range garbage {
garbage[j] = AfterFunc(time.Hour, nil)
}
garbageAll[i] = garbage
}(i)
}
wg.Wait()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
bench(1000)
}
})
b.StopTimer()
for _, garbage := range garbageAll {
for _, t := range garbage {
t.Stop()
}
}
}
func BenchmarkAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
c := make(chan bool)
var f func()
f = func() {
n--
if n >= 0 {
AfterFunc(0, f)
} else {
c <- true
}
}
AfterFunc(0, f)
<-c
})
}
func BenchmarkAfter(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
<-After(1)
}
})
}
func BenchmarkStop(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
NewTimer(1 * time.Second).Stop()
}
})
}
func BenchmarkSimultaneousAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
AfterFunc(0, wg.Done)
}
wg.Wait()
})
}
func BenchmarkStartStop(b *testing.B) {
benchmark(b, func(n int) {
timers := make([]Timer, n)
for i := 0; i < n; i++ {
timers[i] = AfterFunc(time.Hour, nil)
}
for i := 0; i < n; i++ {
timers[i].Stop()
}
})
}
func BenchmarkReset(b *testing.B) {
benchmark(b, func(n int) {
t := NewTimer(time.Hour)
for i := 0; i < n; i++ {
t.Reset(time.Hour)
}
t.Stop()
})
}
func TestAfter(t *testing.T) {
const delay = 100 * time.Millisecond
start := time.Now()
end := <-After(delay)
delayadj := delay
if duration := time.Now().Sub(start); duration < delayadj {
t.Fatalf("After(%s) slept for only %d ns", delay, duration)
}
if min := start.Add(delayadj); end.Before(min) {
t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
}
}
func TestAfterTick(t *testing.T) {
const Count = 10
Delta := 100 * time.Millisecond
if testing.Short() {
Delta = 10 * time.Millisecond
}
t0 := time.Now()
for i := 0; i < Count; i++ {
<-After(Delta)
}
t1 := time.Now()
d := t1.Sub(t0)
target := Delta * Count
if d < target*9/10 {
t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
}
if !testing.Short() && d > target*30/10 {
t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
}
}
func TestAfterStop(t *testing.T) {
var errs []string
logErrs := func() {
for _, e := range errs {
t.Log(e)
}
}
for i := 0; i < 5; i++ {
AfterFunc(100*time.Millisecond, func() {})
t0 := NewTimer(50 * time.Millisecond)
c1 := make(chan bool, 1)
t1 := AfterFunc(150*time.Millisecond, func() { c1 <- true })
c2 := After(200 * time.Millisecond)
t0.Stop()
t1.Stop()
<-c2
select {
case <-t0.Done():
errs = append(errs, "event 0 was not stopped")
continue
case <-c1:
errs = append(errs, "event 1 was not stopped")
continue
default:
}
t1.Stop()
// Test passed, so all done.
if len(errs) > 0 {
t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
logErrs()
}
return
}
t.Errorf("saw %d errors", len(errs))
logErrs()
}
func TestAfterQueuing(t *testing.T) {
// This test flakes out on some systems,
// so we'll try it a few times before declaring it a failure.
const attempts = 5
err := errors.New("!=nil")
for i := 0; i < attempts && err != nil; i++ {
delta := time.Duration(20+i*50) * time.Millisecond
if err = testAfterQueuing(delta); err != nil {
t.Logf("attempt %v failed: %v", i, err)
}
}
if err != nil {
t.Fatal(err)
}
}
var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
type afterResult struct {
slot int
t time.Time
}
func await(slot int, result chan<- afterResult, ac <-chan time.Time) {
result <- afterResult{slot, <-ac}
}
func testAfterQueuing(delta time.Duration) error {
// make the result channel buffered because we don't want
// to depend on channel queueing semantics that might
// possibly change in the future.
result := make(chan afterResult, len(slots))
t0 := time.Now()
for _, slot := range slots {
go await(slot, result, After(time.Duration(slot)*delta))
}
var order []int
var times []time.Time
for range slots {
r := <-result
order = append(order, r.slot)
times = append(times, r.t)
}
for i := range order {
if i > 0 && order[i] < order[i-1] {
return fmt.Errorf("After calls returned out of order: %v", order)
}
}
for i, t := range times {
dt := t.Sub(t0)
target := time.Duration(order[i]) * delta
if dt < target-delta/2 || dt > target+delta*10 {
return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
}
}
return nil
}
func TestTimerStopStress(t *testing.T) {
if testing.Short() {
return
}
for i := 0; i < 100; i++ {
go func(i int) {
timer := AfterFunc(2*time.Second, func() {
t.Errorf("timer %d was not stopped", i)
})
time.Sleep(1 * time.Second)
timer.Stop()
}(i)
}
time.Sleep(3 * time.Second)
}
func testReset(d time.Duration) error {
t0 := NewTimer(2 * d)
time.Sleep(d)
t0.Reset(3 * d)
time.Sleep(2 * d)
select {
case <-t0.Done():
return errors.New("timer fired early")
default:
}
time.Sleep(2 * d)
select {
case <-t0.Done():
default:
return errors.New("reset timer did not fire")
}
t0.Reset(50 * time.Millisecond)
return nil
}
func TestReset(t *testing.T) {
// We try to run this test with increasingly larger multiples
// until one works so slow, loaded hardware isn't as flaky,
// but without slowing down fast machines unnecessarily.
const unit = 25 * time.Millisecond
tries := []time.Duration{
1 * unit,
3 * unit,
7 * unit,
15 * unit,
}
var err error
for _, d := range tries {
err = testReset(d)
if err == nil {
t.Logf("passed using duration %v", d)
return
}
}
t.Error(err)
}
// Test that sleeping for an interval so large it overflows does not
// result in a short sleep duration.
func TestOverflowSleep(t *testing.T) {
const big = time.Duration(int64(1<<63 - 1))
select {
case <-After(big):
t.Fatalf("big timeout fired")
case <-After(25 * time.Millisecond):
// OK
}
const neg = time.Duration(-1 << 63)
select {
case <-After(neg):
// OK
case <-After(1 * time.Second):
t.Fatalf("negative timeout didn't fire")
}
}
|
package main
import (
f "fmt"
log "github.com/Sirupsen/logrus"
"github.com/etree"
"os"
"os/exec"
)
// CephDriver is the Driver of Ceph
type CephDriver struct {
DevDescriptor string
MountPoint string
PoolName float64
ImgName float64
GBSize int
NewGBSize int
XmlName float64
UUid float64
}
//Create RBD Image
func (ceph CephDriver) CreateIMG(PoolName float64,ImgName float64,GBSize int64) error {
output,err:= exec.Command("qemu-img","-f","rbd",fmt.Sprintf("rbd:%v/%v",PoolName,ImgName),fmt.Sprintf("%v",GBSize) ).CombinedOutput()
if err != nil {
log.Errorf("CreateIMG error : %v err %v", string(output), err)
return err
}
log.Info(string(output))
return nil
}
//DeleteVolume delete volume
func (ceph CephDriver) DeleteVolume(PoolName float64,ImgName float64) error {
output,err:=exec.Command("rbd","rm",f.Sprintf("%v/%v",PoolName,ImgName)).CombinedOutput()
if err != nil {
log.Errorf("DeleteIMG error : %v err %v", string(output), err)
return err
}
log.Info(string(output))
return nil
}
//ExtendVolume extend volume Size
func (ceph CephDriver) ExtendVolume(PoolName float64,ImgName float64,GBSize int64) error {
out, err := exec.Command("qemu-img", "resize", "rbd",f.Sprintf("rbd:%v/%v",PoolName,ImgName), f.Sprintf("%v",GBSize)).CombinedOutput()
if err != nil {
log.Errorf("Error %v, Error string %v", err, string(out))
return err
}
return nil
}
//mount rbd 2 vm
func (ceph CephDriver) AttachDevice(mountpoint string,XmlName string) error {
out, err := exec.Command("virsh", "attach-device", f.Sprintf("%v",mountpoint),f.Sprintf("%v",XmlName),"--persistent").CombinedOutput()
if err != nil {
log.Errorf("Error %v, Error string %v", err, string(out))
return err
}
return nil
}
//umount rbd from vm
func (ceph CephDriver) DetachDevice(mountpoint string,XmlName string) error {
out, err := exec.Command("virsh", "detach-device", f.Sprintf("%v",mountpoint),f.Sprintf("%v",XmlName),"--persistent").CombinedOutput()
if err != nil {
log.Errorf("Error %v, Error string %v", err, string(out))
return err
}
return nil
}
//Create XML
func (ceph *CephDriver)XmlDefinition(PoolName float64,ImgName float64,DevDescriptor string,XmlName string) {
var CephIpa1 float64
var CephIpa2 float64
var CephIpa3 float64
var CephIpa4 float64
CephIpa := [] float64 {CephIpa1,CephIpa2,CephIpa3,CephIpa4}
doc := etree.NewDocument()
disk :=doc.CreateElement("disk")
disk.CreateAttr("type","network")
disk.CreateAttr("device","disk")
driver := disk.CreateElement("driver")
driver.CreateAttr("name","qemu")
driver.CreateAttr("type","raw")
auth :=disk.CreateElement("auth")
auth.CreateAttr("username","libvirt")
secrect := auth.CreateElement("secrect")
secrect.CreateAttr("type","ceph")
secrect.CreateAttr("uuid",f.Sprintf("%v", ceph.UUid))
source := disk.CreateElement("source")
source.CreateAttr("protocol","rbd")
source.CreateAttr("name",f.Sprintf("%v/%v",PoolName,ImgName))
host := source.CreateElement("host")
for i:= 0;i<len(CephIpa) ;i++ {
host.CreateAttr("name",f.Sprintf("%v",CephIpa[i]))
host.CreateAttr("port","6789")
}
target := disk.CreateElement("target")
target.CreateAttr("dev",f.Sprintf("%v",DevDescriptor))
target.CreateAttr("bus","virtio")
doc.Indent(7+len(CephIpa))
XmlName,err := doc.WriteTo(os.Stdout)
if err !=nil{
log.Errorf("Error %v, Error string %v", err, string(XmlName))
}
} |
package x
// GENERATED BY XO. DO NOT EDIT.
import (
"errors"
"strings"
//"time"
"ms/sun/shared/helper"
"strconv"
"github.com/jmoiron/sqlx"
)
// (shortname .TableNameGo "err" "res" "sqlstr" "db" "XOLog") -}}//(schema .Schema .Table.TableName) -}}// .TableNameGo}}// ProfileMentioned represents a row from 'sun.profile_mentioned'.
// Manualy copy this to project
type ProfileMentioned__ struct {
Id int `json:"Id"` // Id -
ForUserId int `json:"ForUserId"` // ForUserId -
PostId int `json:"PostId"` // PostId -
PostUserId int `json:"PostUserId"` // PostUserId -
PostType int `json:"PostType"` // PostType -
CreatedTime int `json:"CreatedTime"` // CreatedTime -
// xo fields
_exists, _deleted bool
}
// Exists determines if the ProfileMentioned exists in the database.
func (pm *ProfileMentioned) Exists() bool {
return pm._exists
}
// Deleted provides information if the ProfileMentioned has been deleted from the database.
func (pm *ProfileMentioned) Deleted() bool {
return pm._deleted
}
// Insert inserts the ProfileMentioned to the database.
func (pm *ProfileMentioned) Insert(db XODB) error {
var err error
// if already exist, bail
if pm._exists {
return errors.New("insert failed: already exists")
}
// sql insert query, primary key must be provided
const sqlstr = `INSERT INTO sun.profile_mentioned (` +
`Id, ForUserId, PostId, PostUserId, PostType, CreatedTime` +
`) VALUES (` +
`?, ?, ?, ?, ?, ?` +
`)`
// run query
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, pm.Id, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime)
}
_, err = db.Exec(sqlstr, pm.Id, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime)
if err != nil {
return err
}
// set existence
pm._exists = true
OnProfileMentioned_AfterInsert(pm)
return nil
}
// Insert inserts the ProfileMentioned to the database.
func (pm *ProfileMentioned) Replace(db XODB) error {
var err error
// sql query
const sqlstr = `REPLACE INTO sun.profile_mentioned (` +
`Id, ForUserId, PostId, PostUserId, PostType, CreatedTime` +
`) VALUES (` +
`?, ?, ?, ?, ?, ?` +
`)`
// run query
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, pm.Id, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime)
}
_, err = db.Exec(sqlstr, pm.Id, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return err
}
pm._exists = true
OnProfileMentioned_AfterInsert(pm)
return nil
}
// Update updates the ProfileMentioned in the database.
func (pm *ProfileMentioned) Update(db XODB) error {
var err error
// if doesn't exist, bail
if !pm._exists {
return errors.New("update failed: does not exist")
}
// if deleted, bail
if pm._deleted {
return errors.New("update failed: marked for deletion")
}
// sql query
const sqlstr = `UPDATE sun.profile_mentioned SET ` +
`ForUserId = ?, PostId = ?, PostUserId = ?, PostType = ?, CreatedTime = ?` +
` WHERE Id = ?`
// run query
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime, pm.Id)
}
_, err = db.Exec(sqlstr, pm.ForUserId, pm.PostId, pm.PostUserId, pm.PostType, pm.CreatedTime, pm.Id)
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
OnProfileMentioned_AfterUpdate(pm)
return err
}
// Save saves the ProfileMentioned to the database.
func (pm *ProfileMentioned) Save(db XODB) error {
if pm.Exists() {
return pm.Update(db)
}
return pm.Replace(db)
}
// Delete deletes the ProfileMentioned from the database.
func (pm *ProfileMentioned) Delete(db XODB) error {
var err error
// if doesn't exist, bail
if !pm._exists {
return nil
}
// if deleted, bail
if pm._deleted {
return nil
}
// sql query
const sqlstr = `DELETE FROM sun.profile_mentioned WHERE Id = ?`
// run query
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, pm.Id)
}
_, err = db.Exec(sqlstr, pm.Id)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return err
}
// set deleted
pm._deleted = true
OnProfileMentioned_AfterDelete(pm)
return nil
}
////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////// Querify gen - ME /////////////////////////////////////////
//.TableNameGo= table name
// _Deleter, _Updater
// orma types
type __ProfileMentioned_Deleter struct {
wheres []whereClause
whereSep string
dollarIndex int
isMysql bool
}
type __ProfileMentioned_Updater struct {
wheres []whereClause
// updates map[string]interface{}
updates []updateCol
whereSep string
dollarIndex int
isMysql bool
}
type __ProfileMentioned_Selector struct {
wheres []whereClause
selectCol string
whereSep string
orderBy string //" order by id desc //for ints
limit int
offset int
dollarIndex int
isMysql bool
}
func NewProfileMentioned_Deleter() *__ProfileMentioned_Deleter {
d := __ProfileMentioned_Deleter{whereSep: " AND ", isMysql: true}
return &d
}
func NewProfileMentioned_Updater() *__ProfileMentioned_Updater {
u := __ProfileMentioned_Updater{whereSep: " AND ", isMysql: true}
//u.updates = make(map[string]interface{},10)
return &u
}
func NewProfileMentioned_Selector() *__ProfileMentioned_Selector {
u := __ProfileMentioned_Selector{whereSep: " AND ", selectCol: "*", isMysql: true}
return &u
}
/*/// mysql or cockroach ? or $1 handlers
func (m *__ProfileMentioned_Selector)nextDollars(size int) string {
r := DollarsForSqlIn(size,m.dollarIndex,m.isMysql)
m.dollarIndex += size
return r
}
func (m *__ProfileMentioned_Selector)nextDollar() string {
r := DollarsForSqlIn(1,m.dollarIndex,m.isMysql)
m.dollarIndex += 1
return r
}
*/
/////////////////////////////// Where for all /////////////////////////////
//// for ints all selector updater, deleter
/// mysql or cockroach ? or $1 handlers
func (m *__ProfileMentioned_Deleter) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__ProfileMentioned_Deleter) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__ProfileMentioned_Deleter) Or() *__ProfileMentioned_Deleter {
u.whereSep = " OR "
return u
}
func (u *__ProfileMentioned_Deleter) Id_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) Id_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) Id_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) Id_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) Id_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) Id_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) Id_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) Id_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) Id_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Deleter) ForUserId_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) ForUserId_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) ForUserId_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) ForUserId_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) ForUserId_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) ForUserId_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) ForUserId_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) ForUserId_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) ForUserId_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Deleter) PostId_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostId_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostId_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) PostId_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostId_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostId_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostId_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostId_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostId_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Deleter) PostUserId_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostUserId_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostUserId_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) PostUserId_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostUserId_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostUserId_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostUserId_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostUserId_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostUserId_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Deleter) PostType_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostType_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) PostType_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) PostType_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostType_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostType_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostType_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostType_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) PostType_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Deleter) CreatedTime_In(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) CreatedTime_Ins(ins ...int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Deleter) CreatedTime_NotIn(ins []int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Deleter) CreatedTime_Eq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) CreatedTime_NotEq(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) CreatedTime_LT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) CreatedTime_LE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) CreatedTime_GT(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Deleter) CreatedTime_GE(val int) *__ProfileMentioned_Deleter {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__ProfileMentioned_Updater) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__ProfileMentioned_Updater) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__ProfileMentioned_Updater) Or() *__ProfileMentioned_Updater {
u.whereSep = " OR "
return u
}
func (u *__ProfileMentioned_Updater) Id_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) Id_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) Id_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) Id_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) Id_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) Id_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) Id_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) Id_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) Id_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Updater) ForUserId_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) ForUserId_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) ForUserId_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) ForUserId_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) ForUserId_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) ForUserId_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) ForUserId_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) ForUserId_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) ForUserId_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Updater) PostId_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostId_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostId_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) PostId_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostId_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostId_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostId_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostId_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostId_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Updater) PostUserId_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostUserId_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostUserId_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) PostUserId_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostUserId_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostUserId_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostUserId_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostUserId_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostUserId_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Updater) PostType_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostType_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) PostType_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) PostType_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostType_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostType_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostType_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostType_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) PostType_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Updater) CreatedTime_In(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) CreatedTime_Ins(ins ...int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Updater) CreatedTime_NotIn(ins []int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Updater) CreatedTime_Eq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) CreatedTime_NotEq(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) CreatedTime_LT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) CreatedTime_LE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) CreatedTime_GT(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Updater) CreatedTime_GE(val int) *__ProfileMentioned_Updater {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
/// mysql or cockroach ? or $1 handlers
func (m *__ProfileMentioned_Selector) nextDollars(size int) string {
r := DollarsForSqlIn(size, m.dollarIndex, m.isMysql)
m.dollarIndex += size
return r
}
func (m *__ProfileMentioned_Selector) nextDollar() string {
r := DollarsForSqlIn(1, m.dollarIndex, m.isMysql)
m.dollarIndex += 1
return r
}
////////ints
func (u *__ProfileMentioned_Selector) Or() *__ProfileMentioned_Selector {
u.whereSep = " OR "
return u
}
func (u *__ProfileMentioned_Selector) Id_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) Id_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) Id_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " Id NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) Id_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) Id_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) Id_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) Id_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) Id_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) Id_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " Id >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Selector) ForUserId_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) ForUserId_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) ForUserId_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " ForUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) ForUserId_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) ForUserId_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) ForUserId_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) ForUserId_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) ForUserId_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) ForUserId_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " ForUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Selector) PostId_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostId_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostId_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) PostId_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostId_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostId_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostId_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostId_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostId_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Selector) PostUserId_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostUserId_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostUserId_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostUserId NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) PostUserId_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostUserId_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostUserId_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostUserId_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostUserId_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostUserId_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostUserId >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Selector) PostType_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostType_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) PostType_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " PostType NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) PostType_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostType_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostType_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostType_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostType_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) PostType_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " PostType >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (u *__ProfileMentioned_Selector) CreatedTime_In(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) CreatedTime_Ins(ins ...int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (u *__ProfileMentioned_Selector) CreatedTime_NotIn(ins []int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
for _, i := range ins {
insWhere = append(insWhere, i)
}
w.args = insWhere
w.condition = " CreatedTime NOT IN(" + u.nextDollars(len(ins)) + ") "
u.wheres = append(u.wheres, w)
return u
}
func (d *__ProfileMentioned_Selector) CreatedTime_Eq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime = " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) CreatedTime_NotEq(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime != " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) CreatedTime_LT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime < " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) CreatedTime_LE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime <= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) CreatedTime_GT(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime > " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
func (d *__ProfileMentioned_Selector) CreatedTime_GE(val int) *__ProfileMentioned_Selector {
w := whereClause{}
var insWhere []interface{}
insWhere = append(insWhere, val)
w.args = insWhere
w.condition = " CreatedTime >= " + d.nextDollar()
d.wheres = append(d.wheres, w)
return d
}
///// for strings //copy of above with type int -> string + rm if eq + $ms_str_cond
////////ints
////////ints
////////ints
/// End of wheres for selectors , updators, deletor
/////////////////////////////// Updater /////////////////////////////
//ints
func (u *__ProfileMentioned_Updater) Id(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" Id = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" Id = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) Id_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" Id = Id+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" Id = Id+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" Id = Id- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" Id = Id- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__ProfileMentioned_Updater) ForUserId(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" ForUserId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" ForUserId = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) ForUserId_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" ForUserId = ForUserId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" ForUserId = ForUserId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" ForUserId = ForUserId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" ForUserId = ForUserId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__ProfileMentioned_Updater) PostId(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" PostId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" PostId = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) PostId_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" PostId = PostId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" PostId = PostId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" PostId = PostId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" PostId = PostId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__ProfileMentioned_Updater) PostUserId(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" PostUserId = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" PostUserId = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) PostUserId_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" PostUserId = PostUserId+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" PostUserId = PostUserId+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" PostUserId = PostUserId- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" PostUserId = PostUserId- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__ProfileMentioned_Updater) PostType(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" PostType = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" PostType = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) PostType_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" PostType = PostType+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" PostType = PostType+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" PostType = PostType- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" PostType = PostType- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
//ints
func (u *__ProfileMentioned_Updater) CreatedTime(newVal int) *__ProfileMentioned_Updater {
up := updateCol{" CreatedTime = " + u.nextDollar(), newVal}
u.updates = append(u.updates, up)
// u.updates[" CreatedTime = " + u.nextDollar()] = newVal
return u
}
func (u *__ProfileMentioned_Updater) CreatedTime_Increment(count int) *__ProfileMentioned_Updater {
if count > 0 {
up := updateCol{" CreatedTime = CreatedTime+ " + u.nextDollar(), count}
u.updates = append(u.updates, up)
//u.updates[" CreatedTime = CreatedTime+ " + u.nextDollar()] = count
}
if count < 0 {
up := updateCol{" CreatedTime = CreatedTime- " + u.nextDollar(), count}
u.updates = append(u.updates, up)
// u.updates[" CreatedTime = CreatedTime- " + u.nextDollar() ] = -(count) //make it positive
}
return u
}
//string
/////////////////////////////////////////////////////////////////////
/////////////////////// Selector ///////////////////////////////////
//Select_* can just be used with: .GetString() , .GetStringSlice(), .GetInt() ..GetIntSlice()
func (u *__ProfileMentioned_Selector) OrderBy_Id_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY Id DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_Id_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY Id ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_Id() *__ProfileMentioned_Selector {
u.selectCol = "Id"
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_ForUserId_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY ForUserId DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_ForUserId_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY ForUserId ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_ForUserId() *__ProfileMentioned_Selector {
u.selectCol = "ForUserId"
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostId_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostId DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostId_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostId ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_PostId() *__ProfileMentioned_Selector {
u.selectCol = "PostId"
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostUserId_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostUserId DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostUserId_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostUserId ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_PostUserId() *__ProfileMentioned_Selector {
u.selectCol = "PostUserId"
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostType_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostType DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_PostType_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY PostType ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_PostType() *__ProfileMentioned_Selector {
u.selectCol = "PostType"
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_CreatedTime_Desc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY CreatedTime DESC "
return u
}
func (u *__ProfileMentioned_Selector) OrderBy_CreatedTime_Asc() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY CreatedTime ASC "
return u
}
func (u *__ProfileMentioned_Selector) Select_CreatedTime() *__ProfileMentioned_Selector {
u.selectCol = "CreatedTime"
return u
}
func (u *__ProfileMentioned_Selector) Limit(num int) *__ProfileMentioned_Selector {
u.limit = num
return u
}
func (u *__ProfileMentioned_Selector) Offset(num int) *__ProfileMentioned_Selector {
u.offset = num
return u
}
func (u *__ProfileMentioned_Selector) Order_Rand() *__ProfileMentioned_Selector {
u.orderBy = " ORDER BY RAND() "
return u
}
///////////////////////// Queryer Selector //////////////////////////////////
func (u *__ProfileMentioned_Selector) _stoSql() (string, []interface{}) {
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
sqlstr := "SELECT " + u.selectCol + " FROM sun.profile_mentioned"
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if u.orderBy != "" {
sqlstr += u.orderBy
}
if u.limit != 0 {
sqlstr += " LIMIT " + strconv.Itoa(u.limit)
}
if u.offset != 0 {
sqlstr += " OFFSET " + strconv.Itoa(u.offset)
}
return sqlstr, whereArgs
}
func (u *__ProfileMentioned_Selector) GetRow(db *sqlx.DB) (*ProfileMentioned, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
row := &ProfileMentioned{}
//by Sqlx
err = db.Get(row, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return nil, err
}
row._exists = true
OnProfileMentioned_LoadOne(row)
return row, nil
}
func (u *__ProfileMentioned_Selector) GetRows(db *sqlx.DB) ([]*ProfileMentioned, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var rows []*ProfileMentioned
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnProfileMentioned_LoadMany(rows)
return rows, nil
}
//dep use GetRows()
func (u *__ProfileMentioned_Selector) GetRows2(db *sqlx.DB) ([]ProfileMentioned, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var rows []*ProfileMentioned
//by Sqlx
err = db.Unsafe().Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return nil, err
}
/*for i:=0;i< len(rows);i++ {
rows[i]._exists = true
}*/
for i := 0; i < len(rows); i++ {
rows[i]._exists = true
}
OnProfileMentioned_LoadMany(rows)
rows2 := make([]ProfileMentioned, len(rows))
for i := 0; i < len(rows); i++ {
cp := *rows[i]
rows2[i] = cp
}
return rows2, nil
}
func (u *__ProfileMentioned_Selector) GetString(db *sqlx.DB) (string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var res string
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return "", err
}
return res, nil
}
func (u *__ProfileMentioned_Selector) GetStringSlice(db *sqlx.DB) ([]string, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var rows []string
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__ProfileMentioned_Selector) GetIntSlice(db *sqlx.DB) ([]int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var rows []int
//by Sqlx
err = db.Select(&rows, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return nil, err
}
return rows, nil
}
func (u *__ProfileMentioned_Selector) GetInt(db *sqlx.DB) (int, error) {
var err error
sqlstr, whereArgs := u._stoSql()
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, whereArgs)
}
var res int
//by Sqlx
err = db.Get(&res, sqlstr, whereArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return 0, err
}
return res, nil
}
///////////////////////// Queryer Update Delete //////////////////////////////////
func (u *__ProfileMentioned_Updater) Update(db XODB) (int, error) {
var err error
var updateArgs []interface{}
var sqlUpdateArr []string
/*for up, newVal := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up)
updateArgs = append(updateArgs, newVal)
}*/
for _, up := range u.updates {
sqlUpdateArr = append(sqlUpdateArr, up.col)
updateArgs = append(updateArgs, up.val)
}
sqlUpdate := strings.Join(sqlUpdateArr, ",")
sqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)
var allArgs []interface{}
allArgs = append(allArgs, updateArgs...)
allArgs = append(allArgs, whereArgs...)
sqlstr := `UPDATE sun.profile_mentioned SET ` + sqlUpdate
if len(strings.Trim(sqlWherrs, " ")) > 0 { //2 for safty
sqlstr += " WHERE " + sqlWherrs
}
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, allArgs)
}
res, err := db.Exec(sqlstr, allArgs...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return 0, err
}
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
func (d *__ProfileMentioned_Deleter) Delete(db XODB) (int, error) {
var err error
var wheresArr []string
for _, w := range d.wheres {
wheresArr = append(wheresArr, w.condition)
}
wheresStr := strings.Join(wheresArr, d.whereSep)
var args []interface{}
for _, w := range d.wheres {
args = append(args, w.args...)
}
sqlstr := "DELETE FROM sun.profile_mentioned WHERE " + wheresStr
// run query
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, args)
}
res, err := db.Exec(sqlstr, args...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return 0, err
}
// retrieve id
num, err := res.RowsAffected()
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return 0, err
}
return int(num), nil
}
///////////////////////// Mass insert - replace for ProfileMentioned ////////////////
func MassInsert_ProfileMentioned(rows []ProfileMentioned, db XODB) error {
if len(rows) == 0 {
return errors.New("rows slice should not be empty - inserted nothing")
}
var err error
ln := len(rows)
// insVals_:= strings.Repeat(s, ln)
// insVals := insVals_[0:len(insVals_)-1]
insVals := helper.SqlManyDollars(6, ln, true)
// sql query
sqlstr := "INSERT INTO sun.profile_mentioned (" +
"Id, ForUserId, PostId, PostUserId, PostType, CreatedTime" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.Id)
vals = append(vals, row.ForUserId)
vals = append(vals, row.PostId)
vals = append(vals, row.PostUserId)
vals = append(vals, row.PostType)
vals = append(vals, row.CreatedTime)
}
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, " MassInsert len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return err
}
return nil
}
func MassReplace_ProfileMentioned(rows []ProfileMentioned, db XODB) error {
if len(rows) == 0 {
return errors.New("rows slice should not be empty - inserted nothing")
}
var err error
ln := len(rows)
// insVals_:= strings.Repeat(s, ln)
// insVals := insVals_[0:len(insVals_)-1]
insVals := helper.SqlManyDollars(6, ln, true)
// sql query
sqlstr := "REPLACE INTO sun.profile_mentioned (" +
"Id, ForUserId, PostId, PostUserId, PostType, CreatedTime" +
") VALUES " + insVals
// run query
vals := make([]interface{}, 0, ln*5) //5 fields
for _, row := range rows {
// vals = append(vals,row.UserId)
vals = append(vals, row.Id)
vals = append(vals, row.ForUserId)
vals = append(vals, row.PostId)
vals = append(vals, row.PostUserId)
vals = append(vals, row.PostType)
vals = append(vals, row.CreatedTime)
}
if LogTableSqlReq.ProfileMentioned {
XOLog(sqlstr, " MassReplace len = ", ln, vals)
}
_, err = db.Exec(sqlstr, vals...)
if err != nil {
if LogTableSqlReq.ProfileMentioned {
XOLogErr(err)
}
return err
}
return nil
}
//////////////////// Play ///////////////////////////////
//
//
//
//
//
//
|
// redis project main.go
package main
import (
"errors"
"fmt"
"net"
"strconv"
"sync/atomic"
"time"
log "github.com/cihub/seelog"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"redis/common"
"redis/config"
"github.com/go-redis/redis"
pb "redis/message"
)
var (
redisClusterClient *common.RedisClusterClient
s *grpc.Server
)
type server struct{}
func zrangeByScore(req *pb.RequestCmd) (*pb.ReplyCmd, error) {
minStr, okMin := req.Data["min"]
maxStr, okMax := req.Data["max"]
offsetStr, okOffset := req.Data["offset"]
countStr, okCount := req.Data["count"]
if okMin == false || okMax == false || okOffset == false || okCount == false {
return &pb.ReplyCmd{}, errors.New("there is no min , max , offset or count")
}
offsetInt, errOffset := strconv.Atoi(offsetStr)
countInt, errCount := strconv.Atoi(countStr)
if errOffset != nil || errCount != nil {
return &pb.ReplyCmd{}, errors.New("there is no min , max , offset or count")
}
// func (c *ClusterClient) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
vs, err := redisClusterClient.Client.ZRangeByScore(req.Key, redis.ZRangeBy{
Min: minStr,
Max: maxStr,
Offset: int64(offsetInt),
Count: int64(countInt),
}).Result()
if err != nil {
return &pb.ReplyCmd{}, err
} else {
reply := pb.ReplyCmd{}
reply.Status = pb.ReplyStatus_SUCCESS
reply.Result = make(map[string]string, len(vs))
for i := 0; i < len(vs); i++ {
reply.Result[strconv.Itoa(i)] = vs[i]
}
return &reply, nil
}
}
func zremRangeByScore(req *pb.RequestCmd) (*pb.ReplyCmd, error) {
min, okMin := req.Data["min"]
max, okMax := req.Data["max"]
if okMin == false || okMax == false {
return &pb.ReplyCmd{}, errors.New("there is no min or max")
}
if err := redisClusterClient.Client.ZRemRangeByScore(req.Key, min, max).Err(); err != nil {
return &pb.ReplyCmd{}, err
} else {
return &pb.ReplyCmd{Status: pb.ReplyStatus_SUCCESS}, nil
}
}
func (s *server) Cmd(ctx context.Context, req *pb.RequestCmd) (*pb.ReplyCmd, error) {
fmt.Println(req)
if atomic.LoadInt32(&redisClusterClient.IsClosed) == common.CLOSED {
return &pb.ReplyCmd{}, errors.New("cluster is disconnected")
}
switch req.Cmd {
case pb.CmdOption_GET:
switch req.Type {
case pb.KeyType_STRING:
if v, err := redisClusterClient.Client.Get(req.Key).Result(); err != nil {
if err == redis.Nil {
return &pb.ReplyCmd{}, errors.New("key does not exist")
} else {
return &pb.ReplyCmd{}, err
}
} else {
return &pb.ReplyCmd{
Status: pb.ReplyStatus_SUCCESS,
Result: map[string]string{
"value": v,
},
}, nil
}
case pb.KeyType_LIST:
case pb.KeyType_SORTSET:
return zrangeByScore(req)
default:
return &pb.ReplyCmd{}, errors.New("type error")
} //end switch
case pb.CmdOption_SET:
switch req.Type {
case pb.KeyType_STRING:
if len(req.Value) == 0 {
return &pb.ReplyCmd{}, errors.New("value error")
}
if err := redisClusterClient.Client.Set(req.Key, req.Value[0], time.Duration(req.Expire)).Err(); err != nil {
return &pb.ReplyCmd{}, err
} else {
goto Success
}
case pb.KeyType_LIST:
case pb.KeyType_SORTSET:
// if len(req.Zset) == 0 {
// return &pb.ReplyCmd{}, errors.New("set error")
// }
var s []redis.Z = make([]redis.Z, len(req.Zset))
for i := 0; i < len(s); i++ {
s[i].Score = req.Zset[i].Score
s[i].Member = req.Zset[i].Member
}
if err := redisClusterClient.Client.ZAdd(req.Key, s...).Err(); err != nil {
return &pb.ReplyCmd{}, err
} else {
goto Success
}
default:
return &pb.ReplyCmd{}, errors.New("type error")
} //end switch
case pb.CmdOption_DEL:
switch req.Type {
case pb.KeyType_STRING:
fallthrough
case pb.KeyType_LIST:
fallthrough
case pb.KeyType_SORTSET:
if err := redisClusterClient.Client.Del(req.Key).Err(); err != nil {
return &pb.ReplyCmd{}, err
} else {
goto Success
}
default:
return &pb.ReplyCmd{}, errors.New("type error")
} //end switch
case pb.CmdOption_SET_EXPIRE:
if err := redisClusterClient.Client.Expire(req.Key, time.Duration(req.Expire)*time.Second).Err(); err != nil {
return &pb.ReplyCmd{}, err
} else {
goto Success
}
case pb.CmdOption_GET_EXPIRE:
if v, err := redisClusterClient.Client.TTL(req.Key).Result(); err != nil {
return &pb.ReplyCmd{}, err
} else {
return &pb.ReplyCmd{
Status: pb.ReplyStatus_SUCCESS,
Result: map[string]string{
"ttl": strconv.FormatInt(int64(v), 10),
},
}, nil
}
case pb.CmdOption_ZSET_REM:
return zremRangeByScore(req)
default:
return &pb.ReplyCmd{Status: pb.ReplyStatus_ERROR}, errors.New("cmd error")
} //end switch
return &pb.ReplyCmd{}, nil
Success:
return &pb.ReplyCmd{Status: pb.ReplyStatus_SUCCESS}, nil
}
func main() {
//load log
defer log.Flush()
logger, err := log.LoggerFromConfigAsFile("./log/config.xml")
if err != nil {
log.Errorf("parse config.xml error: %v", err)
}
log.ReplaceLogger(logger)
//load conf
conf := config.LoadConfig()
if conf == nil {
log.Error("read cluster conf error")
return
}
//redis
redisClusterClient = &common.RedisClusterClient{}
addrs := make([]string, len(conf.Cluster.Nodes))
for i := 0; i < len(addrs); i++ {
addrs[i] = fmt.Sprintf("%s:%s", conf.Cluster.Nodes[i][0], conf.Cluster.Nodes[i][1])
}
// fmt.Println(addrs)
if err := redisClusterClient.Connect(addrs); err != nil {
log.Errorf("redisClusterClient error: %v", err)
return
}
serveChan := make(chan struct{})
closeChan := redisClusterClient.NotifyClose(make(chan struct{}))
go func() {
for {
select {
case <-closeChan:
redisClusterClient.Shutdown()
res := false
for i := 0; i < 3; i++ {
if err := redisClusterClient.Connect(addrs); err == nil {
closeChan = redisClusterClient.NotifyClose(make(chan struct{}))
res = true
break
}
time.Sleep(15 * time.Second)
}
if !res {
close(serveChan)
s.Stop()
log.Errorf("redis reconnect error")
}
case <-serveChan:
redisClusterClient.Shutdown()
return
}
}
}()
//serve
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.Server.Ip, conf.Server.Port))
if err != nil {
log.Errorf("failed to listen: ", err)
close(serveChan)
return
}
s = grpc.NewServer()
pb.RegisterRedisServer(s, &server{})
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Errorf("failed to serve: ", err)
close(serveChan)
return
}
}
|
package gcp
import (
"context"
"fmt"
"io/ioutil"
"os"
cloudbuild "cloud.google.com/go/cloudbuild/apiv1"
"cloud.google.com/go/storage"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
)
// GCPCredentialsEnvName contains name of the environment variable used
// to specify the path to file with CGP service account credentials
const (
GCPCredentialsEnvName string = "GOOGLE_APPLICATION_CREDENTIALS"
)
// GCP structure holds necessary information to authenticate and interact with GCP.
type GCP struct {
creds *google.Credentials
}
// New returns an authenticated GCP instance, allowing to interact with GCP API.
func New(credentials []byte) (*GCP, error) {
scopes := []string{
compute.ComputeScope, // permissions to image
storage.ScopeReadWrite, // file upload
}
scopes = append(scopes, cloudbuild.DefaultAuthScopes()...) // image import
var getCredsFunc func() (*google.Credentials, error)
if credentials != nil {
getCredsFunc = func() (*google.Credentials, error) {
return google.CredentialsFromJSON(
context.Background(),
credentials,
scopes...,
)
}
} else {
getCredsFunc = func() (*google.Credentials, error) {
return google.FindDefaultCredentials(
context.Background(),
scopes...,
)
}
}
creds, err := getCredsFunc()
if err != nil {
return nil, fmt.Errorf("failed to get Google credentials: %v", err)
}
return &GCP{creds}, nil
}
// GetCredentialsFromEnv reads the service account credentials JSON file from
// the path pointed to by the environment variable name stored in
// 'GCPCredentialsEnvName'. If the content of the JSON file was read successfully,
// its content is returned as []byte, otherwise nil is returned with proper error.
func GetCredentialsFromEnv() ([]byte, error) {
credsPath, exists := os.LookupEnv(GCPCredentialsEnvName)
if !exists {
return nil, fmt.Errorf("'%s' env variable is not set", GCPCredentialsEnvName)
}
if credsPath == "" {
return nil, fmt.Errorf("'%s' env variable is empty", GCPCredentialsEnvName)
}
var err error
credentials, err := ioutil.ReadFile(credsPath)
if err != nil {
return nil, fmt.Errorf("Error while reading credentials file: %s", err)
}
return credentials, nil
}
// GetProjectID returns a string with the Project ID of the project, used for
// all GCP operations.
func (g *GCP) GetProjectID() string {
return g.creds.ProjectID
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checkpoint
import (
"context"
"encoding/json"
"time"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/tidb/br/pkg/rtree"
"github.com/pingcap/tidb/br/pkg/storage"
)
type BackupKeyType = string
type BackupValueType = RangeType
const (
CheckpointBackupDir = CheckpointDir + "/backup"
CheckpointDataDirForBackup = CheckpointBackupDir + "/data"
CheckpointChecksumDirForBackup = CheckpointBackupDir + "/checksum"
CheckpointMetaPathForBackup = CheckpointBackupDir + "/checkpoint.meta"
CheckpointLockPathForBackup = CheckpointBackupDir + "/checkpoint.lock"
)
func flushPositionForBackup() flushPosition {
return flushPosition{
CheckpointDataDir: CheckpointDataDirForBackup,
CheckpointChecksumDir: CheckpointChecksumDirForBackup,
CheckpointLockPath: CheckpointLockPathForBackup,
}
}
func valueMarshalerForBackup(group *RangeGroup[BackupKeyType, BackupValueType]) ([]byte, error) {
return json.Marshal(group)
}
// only for test
func StartCheckpointBackupRunnerForTest(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
tick time.Duration,
timer GlobalTimer,
) (*CheckpointRunner[BackupKeyType, BackupValueType], error) {
runner := newCheckpointRunner[BackupKeyType, BackupValueType](
ctx, storage, cipher, timer, flushPositionForBackup(), valueMarshalerForBackup)
err := runner.initialLock(ctx)
if err != nil {
return nil, errors.Annotate(err, "Failed to initialize checkpoint lock.")
}
runner.startCheckpointMainLoop(ctx, tick, tick, tick)
return runner, nil
}
func StartCheckpointRunnerForBackup(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
timer GlobalTimer,
) (*CheckpointRunner[BackupKeyType, BackupValueType], error) {
runner := newCheckpointRunner[BackupKeyType, BackupValueType](
ctx, storage, cipher, timer, flushPositionForBackup(), valueMarshalerForBackup)
err := runner.initialLock(ctx)
if err != nil {
return nil, errors.Trace(err)
}
runner.startCheckpointMainLoop(
ctx,
defaultTickDurationForFlush,
defaultTckDurationForChecksum,
defaultTickDurationForLock,
)
return runner, nil
}
func AppendForBackup(
ctx context.Context,
r *CheckpointRunner[BackupKeyType, BackupValueType],
groupKey BackupKeyType,
startKey []byte,
endKey []byte,
files []*backuppb.File,
) error {
return r.Append(ctx, &CheckpointMessage[BackupKeyType, BackupValueType]{
GroupKey: groupKey,
Group: []BackupValueType{
{
Range: &rtree.Range{
StartKey: startKey,
EndKey: endKey,
Files: files,
},
},
},
})
}
// walk the whole checkpoint range files and retrieve the metadata of backed up ranges
// and return the total time cost in the past executions
func WalkCheckpointFileForBackup(
ctx context.Context,
s storage.ExternalStorage,
cipher *backuppb.CipherInfo,
fn func(BackupKeyType, BackupValueType),
) (time.Duration, error) {
return walkCheckpointFile(ctx, s, cipher, CheckpointDataDirForBackup, fn)
}
type CheckpointMetadataForBackup struct {
GCServiceId string `json:"gc-service-id"`
ConfigHash []byte `json:"config-hash"`
BackupTS uint64 `json:"backup-ts"`
Ranges []rtree.Range `json:"ranges"`
CheckpointChecksum map[int64]*ChecksumItem `json:"-"`
CheckpointDataMap map[string]rtree.RangeTree `json:"-"`
}
// load checkpoint metadata from the external storage
func LoadCheckpointMetadata(ctx context.Context, s storage.ExternalStorage) (*CheckpointMetadataForBackup, error) {
m := &CheckpointMetadataForBackup{}
err := loadCheckpointMeta(ctx, s, CheckpointMetaPathForBackup, m)
if err != nil {
return nil, errors.Trace(err)
}
m.CheckpointChecksum, _, err = loadCheckpointChecksum(ctx, s, CheckpointChecksumDirForBackup)
return m, errors.Trace(err)
}
// save the checkpoint metadata into the external storage
func SaveCheckpointMetadata(ctx context.Context, s storage.ExternalStorage, meta *CheckpointMetadataForBackup) error {
return saveCheckpointMetadata(ctx, s, meta, CheckpointMetaPathForBackup)
}
func RemoveCheckpointDataForBackup(ctx context.Context, s storage.ExternalStorage) error {
return removeCheckpointData(ctx, s, CheckpointBackupDir)
}
|
package native
import (
"fmt"
"log"
"os"
"testing"
)
var (
uri, username, password string
)
func init() {
uri = os.Getenv("ORCLURI")
username = os.Getenv("ORCLUSER")
password = os.Getenv("ORCLPWD")
if uri == "" || username == "" || password == "" {
log.Panic("The following env variables must be set: ORCLURI, ORCLUSER, ORCLPWD")
}
}
func TestNewEnvironment(t *testing.T) {
var environment *EnvHandle
environment, err := NewEnvironment()
if err != nil {
t.Fatal(err)
}
if environment == nil {
t.Fatal("Environment is nil")
}
}
func TestSuccessfulBasicLogin(t *testing.T) {
env, err := NewEnvironment()
_, err = env.BasicLogin(username, password, uri)
if err != nil {
fmt.Println(err)
t.Fatal(err)
}
}
func TestFailedBasicLogin(t *testing.T) {
env, _ := NewEnvironment()
_, err := env.BasicLogin("boom", "fail", uri)
if err.Error()[0:9] != "ORA-01017" {
t.Fatal(err.Error())
}
}
func ExamplePing() {
env, _ := NewEnvironment()
svr, err := env.BasicLogin(username, password, uri)
if err != nil {
fmt.Println(err)
}
err = svr.Ping()
if err != nil {
fmt.Println(err)
} else {
fmt.Println("Success!")
}
// Output: Success!
}
|
package main
import (
"reflect"
"testing"
)
func Test_fetchLongestStablePrices(t *testing.T) {
type args struct {
data []int
x int
}
tests := []struct {
name string
args args
want []int
}{
{
name: "Test data 1",
args: args{
[]int{2,4,3,6,6,3},
0,
},
want: []int{6,6},
},
{
name: "Test data 1",
args: args{
[]int{3,1,2,1,2,2,1,3,1,1,2,2,2,2},
1,
},
want: []int{1,2,1,2,2,1},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := fetchLongestStablePrices(tt.args.data, tt.args.x); !reflect.DeepEqual(got, tt.want) {
t.Errorf("fetchLongestStablePrices() = %v, want %v", got, tt.want)
}
})
}
}
|
package time
import (
"sync"
"time"
)
type SharedTime struct {
sync.RWMutex
time time.Time
}
func (s *SharedTime) Before(other time.Time) bool {
s.RLock()
defer s.RUnlock()
return s.time.Before(other)
}
func (s *SharedTime) After(other time.Time) bool {
s.RLock()
defer s.RUnlock()
return !s.time.Before(other)
}
func (s *SharedTime) Set(current time.Time) {
s.Lock()
defer s.Unlock()
s.time = current
}
|
// Copyright (C) 2019 Cisco Systems Inc.
// Copyright (C) 2016-2017 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connectivity
import (
"net"
felixConfig "github.com/projectcalico/calico/felix/config"
calicov3cli "github.com/projectcalico/calico/libcalico-go/lib/clientv3"
"github.com/sirupsen/logrus"
"github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/common"
"github.com/projectcalico/vpp-dataplane/v3/vpplink"
)
const (
FLAT = "flat"
IPSEC = "ipsec"
VXLAN = "vxlan"
IPIP = "ipip"
WIREGUARD = "wireguard"
SRv6 = "srv6"
)
type ConnectivityProviderData struct {
vpp *vpplink.VppLink
log *logrus.Entry
server *ConnectivityServer
}
// ConnectivityProvider configures VPP to have proper connectivity to other K8s nodes.
// Different implementations can connect VPP with VPP in other K8s node by using different networking
// technologies (VXLAN, SRv6,...).
type ConnectivityProvider interface {
AddConnectivity(cn *common.NodeConnectivity) error
DelConnectivity(cn *common.NodeConnectivity) error
// RescanState check current state in VPP and updates local cache
RescanState()
// Enabled checks whether the ConnectivityProvider is enabled in the config
Enabled(cn *common.NodeConnectivity) bool
EnableDisable(isEnable bool)
}
func (p *ConnectivityProviderData) GetNodeByIp(addr net.IP) *common.LocalNodeSpec {
return p.server.GetNodeByIp(addr)
}
func (p *ConnectivityProviderData) GetNodeIPs() (*net.IP, *net.IP) {
return p.server.GetNodeIPs()
}
func (p *ConnectivityProviderData) Clientv3() calicov3cli.Interface {
return p.server.Clientv3
}
func (p *ConnectivityProviderData) GetFelixConfig() *felixConfig.Config {
return p.server.felixConfig
}
func NewConnectivityProviderData(
vpp *vpplink.VppLink,
server *ConnectivityServer,
log *logrus.Entry,
) *ConnectivityProviderData {
return &ConnectivityProviderData{
vpp: vpp,
log: log,
server: server,
}
}
|
package sv
import (
"reflect"
"testing"
"github.com/Masterminds/semver/v3"
)
func TestSemVerCommitsProcessorImpl_NextVersion(t *testing.T) {
tests := []struct {
name string
ignoreUnknown bool
version *semver.Version
commits []GitCommitLog
want *semver.Version
wantUpdated bool
}{
{"no update", true, version("0.0.0"), []GitCommitLog{}, version("0.0.0"), false},
{"no update without version", true, nil, []GitCommitLog{}, nil, false},
{"no update on unknown type", true, version("0.0.0"), []GitCommitLog{commitlog("a", map[string]string{}, "a")}, version("0.0.0"), false},
{"no update on unmapped known type", false, version("0.0.0"), []GitCommitLog{commitlog("none", map[string]string{}, "a")}, version("0.0.0"), false},
{"update patch on unknown type", false, version("0.0.0"), []GitCommitLog{commitlog("a", map[string]string{}, "a")}, version("0.0.1"), true},
{"patch update", false, version("0.0.0"), []GitCommitLog{commitlog("patch", map[string]string{}, "a")}, version("0.0.1"), true},
{"patch update without version", false, nil, []GitCommitLog{commitlog("patch", map[string]string{}, "a")}, nil, true},
{"minor update", false, version("0.0.0"), []GitCommitLog{commitlog("patch", map[string]string{}, "a"), commitlog("minor", map[string]string{}, "a")}, version("0.1.0"), true},
{"major update", false, version("0.0.0"), []GitCommitLog{commitlog("patch", map[string]string{}, "a"), commitlog("major", map[string]string{}, "a")}, version("1.0.0"), true},
{"breaking change update", false, version("0.0.0"), []GitCommitLog{commitlog("patch", map[string]string{}, "a"), commitlog("patch", map[string]string{"breaking-change": "break"}, "a")}, version("1.0.0"), true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := NewSemVerCommitsProcessor(VersioningConfig{UpdateMajor: []string{"major"}, UpdateMinor: []string{"minor"}, UpdatePatch: []string{"patch"}, IgnoreUnknown: tt.ignoreUnknown}, CommitMessageConfig{Types: []string{"major", "minor", "patch", "none"}})
got, gotUpdated := p.NextVersion(tt.version, tt.commits)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("SemVerCommitsProcessorImpl.NextVersion() Version = %v, want %v", got, tt.want)
}
if tt.wantUpdated != gotUpdated {
t.Errorf("SemVerCommitsProcessorImpl.NextVersion() Updated = %v, want %v", gotUpdated, tt.wantUpdated)
}
})
}
}
func TestToVersion(t *testing.T) {
tests := []struct {
name string
input string
want *semver.Version
wantErr bool
}{
{"empty version", "", version("0.0.0"), false},
{"invalid version", "abc", nil, true},
{"valid version", "1.2.3", version("1.2.3"), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ToVersion(tt.input)
if (err != nil) != tt.wantErr {
t.Errorf("ToVersion() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ToVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestIsValidVersion(t *testing.T) {
tests := []struct {
name string
value string
want bool
}{
{"simple version", "1.0.0", true},
{"with v prefix version", "v1.0.0", true},
{"prerelease version", "1.0.0-alpha", true},
{"prerelease version", "1.0.0-alpha.1", true},
{"prerelease version", "1.0.0-0.3.7", true},
{"prerelease version", "1.0.0-x.7.z.92", true},
{"prerelease version", "1.0.0-x-y-z.-", true},
{"metadata version", "1.0.0-alpha+001", true},
{"metadata version", "1.0.0+20130313144700", true},
{"metadata version", "1.0.0-beta+exp.sha.5114f85", true},
{"metadata version", "1.0.0+21AF26D3-117B344092BD", true},
{"incomplete version", "1", true},
{"invalid version", "invalid", false},
{"invalid prefix version", "random1.0.0", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidVersion(tt.value); got != tt.want {
t.Errorf("IsValidVersion(%s) = %v, want %v", tt.value, got, tt.want)
}
})
}
}
|
package main
import "fmt"
func main() {
var n int
fmt.Scanf("%d", &n)
for i := 0; i < n-1; i++ {
fmt.Print("Ho ")
}
fmt.Println("Ho!")
}
|
package interfaces
import (
"github.com/golangid/candi/codebase/factory/types"
"github.com/labstack/echo"
"google.golang.org/grpc"
)
// RESTHandler delivery factory for REST handler
type RESTHandler interface {
Mount(group *echo.Group)
}
// GRPCHandler delivery factory for GRPC handler
type GRPCHandler interface {
Register(server *grpc.Server, middlewareGroup *types.MiddlewareGroup)
}
// GraphQLHandler delivery factory for GraphQL resolver handler
type GraphQLHandler interface {
Query() interface{}
Mutation() interface{}
Subscription() interface{}
RegisterMiddleware(group *types.MiddlewareGroup)
}
// WorkerHandler delivery factory for all worker handler
type WorkerHandler interface {
MountHandlers(group *types.WorkerHandlerGroup)
}
|
// Package wordwrap provide a utility to wrap text on word boundaries.
package wordwrap
import (
"bufio"
"io"
"strings"
"unicode"
)
// Scanner wraps UTF-8 encoded text at word boundaries when lines exceed a limit
// number of characters. Newlines are preserved, including consecutive and
// trailing newlines, though trailing whitespace is stripped from each line.
//
// Clients should not assume Scanner is thread-safe.
type Scanner struct {
r io.RuneScanner
limit int
prefix string
tabWidth int
// Scan state
err error
line runeBuffer
word runeBuffer
space runeBuffer
needNewline bool
skipNextWS bool // Skip non-newline whitespace if true.
}
// NewScanner creates and initializes a new Scanner given a reader and fixed
// line limit. The new Scanner takes ownership of the reader, and the caller
// should not use it after this call.
func NewScanner(r io.Reader, limit int) *Scanner {
rs, ok := r.(io.RuneScanner)
if !ok {
rs = bufio.NewReader(r)
}
return &Scanner{r: rs, limit: limit, tabWidth: 4}
}
// SetPrefix sets a string to prefix each future line. The prefix is not applied
// to empty lines and the prefix's length is not included in the character limit
// specified in NewScanner.
//
// It's safe to call SetPrefix between calls to ReadLine.
func (s *Scanner) SetPrefix(prefix string) {
s.prefix = prefix
}
// SetTabWidth sets the width of tab characters.
//
// It's safe to call SetTabWidth between calls to ReadLine.
func (s *Scanner) SetTabWidth(width int) {
s.tabWidth = width
}
// ReadLine reads a single wrapped line, not including end-of-line characters
// ("\n"). Trailing newlines are preserved. At EOF, the result will be an empty
// string and the error will be io.EOF.
//
// ReadLine always attempts to return at least one line, even on empty input.
//
// ReadLine attempts to handle tab characters gracefully, converting them to
// spaces aligned on the boundary define in SetTabWidth.
func (s *Scanner) ReadLine() (string, error) {
if s.err != nil {
return "", s.err
}
for {
var char rune
char, _, err := s.r.ReadRune()
if err == io.EOF {
break
} else if err != nil {
s.err = err
return "", err
}
if unicode.IsSpace(char) {
if _, err := s.flushWord(); err != nil {
s.err = err
return "", err
}
if char == '\n' {
ret := s.line.String()
s.skipNextWS = false
s.line.Reset()
s.space.Reset()
return ret, nil
}
if s.skipNextWS {
continue
}
if char == '\t' {
// Replace tabs with spaces while preserving alignment.
count := 0
if width := s.tabWidth; width != 0 {
count = width - s.line.Count()%width
}
s.space.WriteString(strings.Repeat(" ", count))
} else {
if _, err := s.space.WriteRune(char); err != nil {
s.err = err
return "", err
}
}
} else {
s.word.WriteRune(char)
s.skipNextWS = false
if s.needNewline {
ret := s.line.String()
s.needNewline = false
s.line.Reset()
return ret, nil
}
}
// Commit the line if we've reached the maximum width.
if s.line.Count()+s.word.Count()+s.space.Count() >= s.limit {
//fmt.Println(s.lineChars, s.spaceChars, s.line.String()+s.space.String())
next, nextSize, err := peekRune(s.r)
if err != nil && err != io.EOF {
s.err = err
return "", err
}
// Flush if the next character constitutes a word break.
if s.word.Count() == s.limit || unicode.IsSpace(next) || nextSize == 0 {
if _, err := s.flushWord(); err != nil {
s.err = err
return "", err
}
}
if nextSize != 0 && next != '\n' && s.space.Count() < s.limit {
// We had some non-whitespace chars, so start a new line for the next write.
s.needNewline = true
}
s.skipNextWS = true
s.space.Reset()
}
}
if _, err := s.flushWord(); err != nil {
s.err = err
return "", err
}
ret := s.line.String()
s.line.Reset()
s.err = io.EOF
return ret, nil
}
// WriteTo implements io.WriterTo. This may make multiple calls to the Read
// method of the underlying Reader.
func (s *Scanner) WriteTo(w io.Writer) (n int64, err error) {
firstLine := true
newline := []byte("\n")
for {
line, err := s.ReadLine()
if err == io.EOF {
return n, nil
} else if err != nil {
return n, err
}
if !firstLine {
written, err := w.Write(newline)
n += int64(written)
if err != nil {
return n, err
}
}
written, err := io.WriteString(w, line)
n += int64(written)
if err != nil {
return n, err
}
firstLine = false
}
}
func (s *Scanner) flushWord() (int, error) {
var written int
if s.word.Count() > 0 {
if s.line.Count() == 0 {
n, err := s.line.WriteString(s.prefix)
written += n
if err != nil {
return written, err
}
}
n, err := s.space.WriteTo(&s.line)
written += int(n)
if err != nil {
return written, err
}
n, err = s.word.WriteTo(&s.line)
written += int(n)
if err != nil {
return written, err
}
}
return written, nil
}
func peekRune(r io.RuneScanner) (rune, int, error) {
ch, size, err := r.ReadRune()
if err != nil {
return ch, size, err
}
if err := r.UnreadRune(); err != nil {
return 0, 0, err
}
return ch, size, nil
}
|
package Problem0155
// MinStack 是可以返回最小值的栈
type MinStack struct {
stack []item
}
type item struct {
min, x int
}
// Constructor 构造 MinStack
func Constructor() MinStack {
return MinStack{}
}
// Push 存入数据
func (s *MinStack) Push(x int) {
min := x
if len(s.stack) > 0 && s.GetMin() < x {
min = s.GetMin()
}
s.stack = append(s.stack, item{min: min, x: x})
}
// Pop 抛弃最后一个入栈的值
func (s *MinStack) Pop() {
s.stack = s.stack[:len(s.stack)-1]
}
// Top 返回最大值
func (s *MinStack) Top() int {
return s.stack[len(s.stack)-1].x
}
// GetMin 返回最小值
func (s *MinStack) GetMin() int {
return s.stack[len(s.stack)-1].min
}
|
package range_sum_bst
type Tree interface {
RangeSumBST(int, int) int
}
type tree struct {
head TreeNode
}
func (t *tree) RangeSumBST(L int, R int) int {
if t.head == nil {
return 0
}
var result int
if t.head.GetValue() < L {
result += NewTree(t.head.GetRight()).RangeSumBST(L, R)
}
if t.head.GetValue() > R {
result += NewTree(t.head.GetLeft()).RangeSumBST(L, R)
}
if t.head.GetValue() >= L && t.head.GetValue() <= R {
result += t.head.GetValue()
if t.head.GetValue() != R {
result += NewTree(t.head.GetRight()).RangeSumBST(L, R)
}
if t.head.GetValue() != L {
result += NewTree(t.head.GetLeft()).RangeSumBST(L, R)
}
}
return result
}
func NewTree(node TreeNode) Tree {
return &tree{head:node}
}
|
package main
import (
"math"
"github.com/fogleman/ln/ln"
)
func main() {
cube("cube")
hole(xxyy, "hole")
sphere("sphere", false)
sphere("outline-sphere", true)
cylinder("cylinder", false)
cylinder("outline-cylinder", true)
}
func cube(out string) {
// create a scene and add a single cube
scene := ln.Scene{}
scene.Add(ln.NewCube(ln.Vector{X: -1, Y: -1, Z: -1}, ln.Vector{X: 1, Y: 1, Z: 1}))
// define camera parameters
eye := ln.Vector{X: 4, Y: 3, Z: 2} // camera position
center := ln.Vector{X: 0, Y: 0, Z: 0} // camera looks at
up := ln.Vector{X: 0, Y: 0, Z: 1} // up direction
// define rendering parameters
width := 1024.0 // rendered width
height := 1024.0 // rendered height
fovy := 50.0 // vertical field of view, degrees
znear := 0.1 // near z plane
zfar := 10.0 // far z plane
step := 0.01 // how finely to chop the paths for visibility testing
// compute 2D paths that depict the 3D scene
paths := scene.Render(eye, center, up, width, height, fovy, znear, zfar, step)
// render the paths in an image
paths.WriteToPNG(out+".png", width, height)
// save the paths as an svg
paths.WriteToSVG(out+".svg", width, height)
}
func xxyy(x, y float64) float64 {
return -1 / (x*x + y*y)
}
func cosxy(x, y float64) float64 {
return math.Cos(x*y) * (x*x - y*y)
}
func hole(f func(x, y float64) float64, out string) {
scene := ln.Scene{}
// add func
box := ln.Box{
Min: ln.Vector{X: -2, Y: -2, Z: -4},
Max: ln.Vector{X: 2, Y: 2, Z: 2},
}
scene.Add(ln.NewFunction(f, box, ln.Below))
eye := ln.Vector{X: 3, Y: 0, Z: 3}
center := ln.Vector{X: 1.1, Y: 0, Z: 0}
up := ln.Vector{X: 0, Y: 0, Z: 1}
width := 1024.0
height := 1024.0
paths := scene.Render(eye, center, up, width, height, 50, 0.1, 100, 0.01)
paths.WriteToPNG(out+".png", width, height)
paths.WriteToSVG(out+".svg", width, height)
}
func sphere(out string, outline bool) {
scene := ln.Scene{}
eye := ln.Vector{X: 3, Y: 0, Z: 3}
center := ln.Vector{X: 1.1, Y: 0, Z: 0}
up := ln.Vector{X: 0, Y: 0, Z: 1}
radius := 0.333
width := 1024.0
height := 1024.0
if outline {
o := ln.NewOutlineSphere(eye, up, center, radius)
scene.Add(o)
} else {
s := ln.NewSphere(center, radius)
scene.Add(s)
}
paths := scene.Render(eye, center, up, width, height, 50, 0.1, 100, 0.01)
paths.WriteToPNG(out+".png", width, height)
paths.WriteToSVG(out+".svg", width, height)
}
func cylinder(out string, outline bool) {
scene := ln.Scene{}
eye := ln.Vector{X: 3, Y: 0, Z: 3}
center := ln.Vector{X: 1.1, Y: 0, Z: 0}
up := ln.Vector{X: 0, Y: 0, Z: 1}
radius := 0.333
width := 1024.0
height := 1024.0
if outline {
o := ln.NewOutlineCylinder(eye, up, radius, -3, 3)
scene.Add(o)
} else {
s := ln.NewCylinder(radius, -3, 3)
scene.Add(s)
}
paths := scene.Render(eye, center, up, width, height, 50, 0.1, 100, 0.01)
paths.WriteToPNG(out+".png", width, height)
paths.WriteToSVG(out+".svg", width, height)
}
|
package main
import (
"fmt"
"io/fs"
"os"
"path/filepath"
)
const specPathBase = `/Users/jameslucktaylor/git/github.com/TykTechnologies/ara/k8s/deployments/home/go`
func main() {
sfs := SpecFS{base: specPathBase}
if err := StatHomeNS(sfs); err != nil {
fmt.Fprintf(os.Stderr, "could not stat file: %v\n", err)
}
}
func StatHomeNS(f fs.FS) error {
file, err := f.Open("home_namespace.yaml")
if err != nil {
return err
}
s, err := file.Stat()
if err != nil {
return err
}
fmt.Printf("stat: '%+v'\n", s)
return nil
}
type SpecFS struct {
base string
}
func (s SpecFS) Open(name string) (fs.File, error) {
o := filepath.Join(s.base, name)
f, err := os.Open(o)
if err != nil {
return nil, err
}
return f, nil
}
|
package bot
import (
"time"
"sync"
"log"
)
type Context struct {
//state for handler to inspect
Message *Message
CurrentResponse *Response
Inline *Inline
//telegram account info
BotAccount *BotAccount
//next handler to handle
NextHandler Handler
//inner state to choose handler
responses []*Response
handlers map[Matcher]Handler
//to track and delete old contexts
lastModified time.Time
lock sync.Mutex
log *contextLogger
}
//construct a context for an account
func newContext(acc *BotAccount) *Context {
log.Println("Bot::newContext")
c := &Context{
BotAccount: acc,
handlers: make(map[Matcher]Handler),
CurrentResponse: &Response{},
lastModified: time.Now(),
}
c.log = newContextLogger(acc, c)
return c
}
func (c *Context) RegisterHandler(m Matcher, h Handler) {
log.Println("Context::RegisterHandler")
c.handlers[m] = h
}
|
/*--------------------------------------------------------------
* package: 初始化服务
* time: 2018/04/17
*-------------------------------------------------------------*/
package api
import (
"encoding/json"
"github.com/golang/glog"
"strconv"
"sub_account_service/blockchain_server/arguments"
c "sub_account_service/blockchain_server/config"
"sub_account_service/blockchain_server/contracts"
"sub_account_service/blockchain_server/lib"
myeth "sub_account_service/blockchain_server/lib/eth"
"time"
)
//DeployAddress 部署地址
var DeployAddress string
var init_key string
// 一期
//var addr string = "0x59f1b27caf3d72cd6edd87d3142991a2a6f35420"
//var acco string = "0xd46966b4b199332a9a03c8e7996c9c6449e426bf"
func Init() {
glog.Infoln("api init enter")
DeployAddress = c.ConfInstance().DeployAddress
ManageMentInit()
}
// 初始化合约
func ManageMentInit() string {
glog.Infoln(lib.Loger("initing", "print"), c.Opts().AccAddress, c.Opts().DeployAddress)
if len(c.Opts().DeployAddress) == 0 {
key := myeth.ParseKeyStore(c.Opts().ManagerKey, c.Opts().ManagerPhrase)
b, err := json.Marshal(key)
if err != nil {
glog.Infoln(err)
panic(err)
}
num := strconv.Itoa(int(time.Now().Unix()))
glog.Infoln(string(b))
addr, _, err := contracts.Deploy(string(b),
arguments.DeployArguments{
TokenName: c.Opts().ServerId + num,
TokenSymbol: num,
SubPayer: c.Opts().PayAddress,
Postscript: "",
})
if addr == "" || err != nil {
glog.Errorln("[init error]: associated account fail ", addr)
return ""
}
glog.Infoln(lib.Log("initing", "", "Associated account"), addr)
DeployAddress = addr
return addr
}
return ""
}
|
// Package privacy provides functions for removing private information
// from data of different types.
package privacy
import (
"github.com/golang/protobuf/proto"
"gopkg.in/sorcix/irc.v2"
pb "github.com/robustirc/robustirc/internal/proto"
"github.com/robustirc/robustirc/internal/robust"
)
func FilterSnapshot(snapshot pb.Snapshot) pb.Snapshot {
result := proto.Clone(&snapshot).(*pb.Snapshot)
for _, session := range result.Sessions {
session.Pass = "<privacy filtered>"
}
return *result
}
func FilterIrcmsg(message *irc.Message) *irc.Message {
if message == nil {
return nil
}
if message.Command == irc.PRIVMSG ||
message.Command == irc.NOTICE ||
message.Command == irc.PASS {
if len(message.Params) > 0 {
message.Params[len(message.Params)-1] = "<privacy filtered>"
}
}
return message
}
func FilterMsg(message *robust.Message) *robust.Message {
return &robust.Message{
Id: message.Id,
Session: message.Session,
Type: message.Type,
Data: FilterIrcmsg(irc.ParseMessage(message.Data)).String(),
}
}
func FilterMsgs(messages []*robust.Message) []*robust.Message {
output := make([]*robust.Message, len(messages))
for idx, message := range messages {
output[idx] = FilterMsg(message)
}
return output
}
|
package calc
import "math"
// QuadRoot calculates the real roots of a quadratic equation.
// See https://en.wikipedia.org/wiki/Quadratic_formula.
func QuadRoot(a float64, b float64, c float64) []float64 {
roots := make([]float64, 0, 2)
// Calculate the discriminant.
disc := b*b - 4.0*a*c
if disc < 0.0 {
return roots
}
r := math.Sqrt(disc)
// Calculate the first root.
x1 := (-1*b + r) / (2.0 * a)
roots = append(roots, x1)
if Near(disc, 0.0) {
return roots
}
// Calculate the second root
x2 := (-1*b - r) / (2.0 * a)
roots = append(roots, x2)
return roots
}
|
package main
import (
"fmt"
"sync"
"time"
)
var cabs = 2
var wg1 sync.WaitGroup
func main() {
m := &sync.Mutex{}
names := []string{"Ravi", "Raj", "Dev", "Vipin", "Ankit"}
for _, name := range names {
wg1.Add(1)
go cab(name, m)
}
wg1.Wait()
}
func cab(name string, m *sync.Mutex) {
m.Lock()
if cabs >= 1 {
fmt.Println("Cab is available for ", name)
time.Sleep(1 * time.Second)
fmt.Println("Cab Confiremed for ", name)
fmt.Println("Thanks", name)
cabs--
} else {
fmt.Println("Cab is not available for ", name)
}
m.Unlock()
wg1.Done()
}
|
package afdb
import (
"database/sql"
_ "github.com/lib/pq"
"log"
"fmt"
"strings"
"strconv"
)
type Db struct {
Connection *sql.DB
}
type Player struct {
UserName string
UserId int64
Count int
Money float64
}
type Game struct {
Holder string
HolderId int64
Comment string
}
func (th *Db) Close() {
th.Connection.Close()
}
func (th *Db) NewGame(chatId int64, gameHolder string, gameHolderId int64, comment string) {
log.Printf("Creating tables for chat %d, %s, %s", chatId, gameHolder, comment)
if _, err := th.Connection.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS game_%d;`, uint64(chatId))); err != nil {
log.Println("Can't drop previous game")
}
if _, err := th.Connection.Exec(fmt.Sprintf(`CREATE TABLE game_%d (USER_ID INT PRIMARY KEY, USERNAME TEXT, PLAYERS INT, MONEY REAL);`, uint64(chatId))); err != nil {
log.Panic("Can't create table: %s", err)
}
if _, err := th.Connection.Exec(`insert into active_games (chat_id, game_holder, game_holder_id, holder_message)
values ($1, $2, $3, $4);`, chatId, gameHolder, gameHolderId, comment); err != nil {
log.Println("Can't insert active game: %s", err)
}
}
func (th *Db) GameInfo(chatId int64) Game {
data := fmt.Sprintf("select (game_holder, game_holder_id, holder_message) from active_games where chat_id = %d;", chatId)
rows, err := th.Connection.Query(data)
if err != nil {
log.Printf("Error. Query error: %s", err)
return Game{}
}
defer rows.Close()
var (
result string
userName string
userId int64
message string
)
if rows.Next() {
if err := rows.Scan(&result); err != nil {
log.Fatal(err)
}
log.Printf(result)
result = result[1:len(result) - 1]
tokens := strings.FieldsFunc(result,
func(c rune) bool {
return c == ','
})
userName = strings.TrimSuffix(strings.TrimPrefix(tokens[0], `"`), `"`)
userId, _ = strconv.ParseInt(tokens[1], 10, 64)
message = strings.TrimSuffix(strings.TrimPrefix(tokens[2], `"`), `"`)
}
return Game {
Holder : userName,
HolderId : userId,
Comment : message,
}
}
func (th *Db) ChatPlayers(chatId int64) []Player {
data := fmt.Sprintf("SELECT (user_id, PLAYERS, MONEY, USERNAME) FROM game_%d;", uint64(chatId))
rows, err := th.Connection.Query(data)
players := make([]Player, 0)
if err != nil {
log.Printf("Error. Query error: %s", err)
return players
}
defer rows.Close()
for rows.Next() {
var (
result string
userName string
userId int64
count int
money float64
)
if err := rows.Scan(&result); err != nil {
log.Fatal(err)
}
result = result[1:len(result) - 1]
tokens := strings.FieldsFunc(result,
func(c rune) bool {
return c == ','
})
userId, _ = strconv.ParseInt(tokens[0], 10, 64)
count, _ = strconv.Atoi(tokens[1])
money, _ = strconv.ParseFloat(tokens[2], 64)
userName = strings.TrimSuffix(strings.TrimPrefix(tokens[3],`"`), `"`)
players = append(players, Player{
UserName : userName,
UserId : userId,
Count : count,
Money : money,
})
}
return players
}
func (th* Db) NewPlayer(chatId int64, userId int64, userName string, players int) bool {
data := fmt.Sprintf(`INSERT INTO game_%d (USER_ID, USERNAME, PLAYERS, MONEY) VALUES($1, $2, $3, $4)
ON CONFLICT (USER_ID) DO UPDATE SET PLAYERS=game_%d.PLAYERS+$3;`, uint64(chatId), uint64(chatId))
_, err := th.Connection.Exec(data, userId, userName, players, 0)
if err != nil {
log.Printf("Error. Can't add player: %s", err)
return false
}
return true
}
func (th* Db) DropPlayer(chatId int64, userId int64, players int) {
data := fmt.Sprintf(`UPDATE game_%d SET PLAYERS=game_%d.PLAYERS-$1 where USER_ID=$2;`, uint64(chatId), uint64(chatId))
_, err := th.Connection.Exec(data, players, userId)
if err != nil {
log.Printf("Error. Can't remove player: %s", err)
return
}
data = fmt.Sprintf(`DELETE FROM game_%d where PLAYERS <= 0 and USER_ID=$1;`, uint64(chatId))
_, err = th.Connection.Exec(data, userId)
if err != nil {
log.Printf("Error. Can't remove row: %s", err)
}
}
func (th* Db) PutMoney(chatId int64, userId int64, userName string, money float64) bool {
data := fmt.Sprintf(`INSERT INTO game_%d (USER_ID, USERNAME, PLAYERS, MONEY) VALUES($1, $2, 1, $3) ON CONFLICT (USER_ID) DO UPDATE SET MONEY=game_%d.money+$3;`, uint64(chatId), uint64(chatId))
_, err := th.Connection.Exec(data, userId, userName, money)
if err != nil {
log.Printf("Error. Can't add player: %s", err)
return false
}
data = `INSERT INTO bank (chat_id, money, game_cost) VALUES($1, $2, 0.0) ON CONFLICT (chat_id) DO UPDATE SET MONEY=bank.money+$2;`
_, err = th.Connection.Exec(data, chatId, money)
if err != nil {
log.Printf("Error. Can't money to the bank: %s", err)
}
return true
}
func (th* Db) Init() {
data := `create table if not exists bank (chat_id int primary key, money real, game_cost real);`
_, err := th.Connection.Exec(data)
if err != nil {
log.Printf("Error. Can't create bank table: %s", err)
}
data = `create table if not exists active_games (chat_id int primary key, game_holder text, game_holder_id int, holder_message text);`
_, err = th.Connection.Exec(data)
if err != nil {
log.Printf("Error. Can't create active_games table: %s", err)
}
}
func (th* Db) SetGameCost(chatId int64, howMuch float64) {
data := `insert into bank (chat_id, money, game_cost) values($1, 0.0, $2) on conflict (chat_id) do update set game_cost=$2;`
_, err := th.Connection.Exec(data, chatId, howMuch)
if err != nil {
log.Printf("Error. Can't set cost: %s", err)
}
}
func (th* Db) PayForTheGame(chatId int64) {
data := `update bank set money=(bank.money-(select (game_cost) from bank where chat_id=$1)) where chat_id=$1;`
_, err := th.Connection.Exec(data, chatId)
if err != nil {
log.Printf("Error. Can't take money from the bank: %s", err)
}
if _, err := th.Connection.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS game_%d`, uint64(chatId))); err != nil {
log.Println("Can't drop previous game")
}
data = fmt.Sprintf("delete from active_games where chat_id = %d;", chatId)
if _, err := th.Connection.Exec(data); err != nil {
log.Printf("Error. Can't delete active game: %s", err)
}
}
func (th* Db) HowMuchMoney(chatId int64) float64 {
data := fmt.Sprintf("select (money) from bank where chat_id=%d;", chatId)
rows, err := th.Connection.Query(data)
if err != nil {
log.Printf("Error. Query error: %s", err)
return 0.0
}
defer rows.Close()
money := float64(0)
var result string
if rows.Next() {
if err := rows.Scan(&result); err != nil {
log.Fatal(err)
}
}
fmt.Sscanf(result, "%f", &money)
return money
}
func (th* Db) GameExists(chatId int64) bool {
tableId := fmt.Sprintf("game_%d", uint64(chatId))
text := fmt.Sprintf("SELECT to_regclass('%s');", tableId)
rows, err := th.Connection.Query(text)
if err != nil {
log.Printf("Error. Query error: %s", err)
return false
}
defer rows.Close()
var result string
if rows.Next() {
if err := rows.Scan(&result); err != nil {
log.Printf("Cant find table: %s", err)
return false
}
}
if result == tableId {
return true
}
return false
}
func DbConnect(host string, port string, user string, pswd string, name string, sslMode string) (*Db, error) {
connection, err := sql.Open("postgres", fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", host, port, user, pswd, name, sslMode))
return &Db{
Connection : connection,
}, err
}
|
package ir
// IntPredicate represents a predicate for comparing integers.
type IntPredicate int
const (
IntEQ IntPredicate = iota // equal
IntNE // not equal
IntUGT // unsigned greater than
IntUGE // unsigned greater than or equal to
IntULT // unsigned less than
IntULE // unsigned less than or equal to
IntSGT // signed greater than
IntSGE // signed greater than or equal to
IntSLT // signed less than
IntSLE // signed less than or equal to
)
var intPredicateStrings = []string{"eq", "ne", "ugt", "uge", "ult", "ule", "sgt", "sge", "slt", "sle"}
func (v IntPredicate) String() string {
if v < 0 || int(v) >= len(intPredicateStrings) {
return "err"
}
return intPredicateStrings[v]
}
|
package leetcode
import "testing"
func TestSumRootToLeaf(t *testing.T) {
q1 := &TreeNode{
Val: 1,
Left: &TreeNode{
Val: 0,
Left: &TreeNode{
Val: 0,
},
Right: &TreeNode{
Val: 1,
},
},
Right: &TreeNode{
Val: 1,
Left: &TreeNode{
Val: 0,
},
Right: &TreeNode{
Val: 1,
},
},
}
if sumRootToLeaf(q1) != 22 {
t.Fatal()
}
}
|
// Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package requests
import (
"reflect"
"testing"
)
func TestNewGitClient(t *testing.T) {
tests := []struct {
name string
want GitClient
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewGitClient(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewGitClient() = %v, want %v", got, tt.want)
}
})
}
}
func TestGitClient_Clone(t *testing.T) {
type args struct {
remoteURL string
targetFolder string
}
tests := []struct {
name string
gitclient *GitClient
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gitclient := &GitClient{}
gitclient.Clone(tt.args.remoteURL, tt.args.targetFolder)
})
}
}
|
package httpapi
import (
"context"
"crypto/tls"
"net/http"
"github.com/serverless/event-gateway/internal/sync"
"go.uber.org/zap"
)
// ServerConfig contains information for an HTTP listener to interact with its environment.
type ServerConfig struct {
Log *zap.Logger
TLSCrt *string
TLSKey *string
Port uint
ShutdownGuard *sync.ShutdownGuard
}
var tlsConf = &tls.Config{
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
// Server is a context-aware http server.
type Server struct {
Config ServerConfig
HTTPHandler *http.Server
}
// Listen sets up a graceful shutdown mechanism and runs the http.Server.
func (s Server) Listen() {
go func() {
<-s.Config.ShutdownGuard.ShuttingDown
s.HTTPHandler.Shutdown(context.Background())
}()
var err error
if *s.Config.TLSCrt != "" && *s.Config.TLSKey != "" {
s.HTTPHandler.TLSConfig = tlsConf
s.HTTPHandler.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
err = s.HTTPHandler.ListenAndServeTLS(*s.Config.TLSCrt, *s.Config.TLSKey)
} else {
err = s.HTTPHandler.ListenAndServe()
}
s.Config.Log.Error("HTTP server failed.", zap.Error(err))
s.Config.ShutdownGuard.InitiateShutdown()
}
|
package main
import "fmt"
func recurse(n int) int{
if n == 0{
return 0
}
return n + recurse(n-1)
}
func main(){
fmt.Println(recurse(3))
fmt.Println(recurse(10))
fmt.Println(recurse(8))
}
|
package main
import "fmt"
func main() {
var a[2]string
a[0] = "Hello"
a[1] = "World"
fmt.Println(a[0], a[1])
fmt.Println(a)
// Hello World
// [Hello World]
}
/*
数组
类型 [n]T 是一个有 n 个类型为 T 的值的数组
表达式
var a[10] int
定义变量 a 是一个有十个整数的数组。
数组的长度是其类型的一部分, 因此数组不能改变大小。
这看起来是一个制约, 但是 Go 提供了更加便利的方式来使用数组
*/
|
// +build !integration
package disgord
import (
"io/ioutil"
"testing"
"github.com/andersfylling/disgord/internal/util"
)
func TestStateMarshalling(t *testing.T) {
data, err := ioutil.ReadFile("testdata/voice/state1.json")
check(err, t)
state := VoiceState{}
err = util.Unmarshal(data, &state)
check(err, t)
}
|
package testing
import (
"github.com/devspace-cloud/devspace/pkg/devspace/build"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/randutil"
)
// FakeController is the fake build controller
type FakeController struct {
BuiltImages map[string]string
}
// NewFakeController creates a new fake build controller
func NewFakeController(config *latest.Config) build.Controller {
builtImages := map[string]string{}
for _, imageConf := range config.Images {
if imageConf.Build != nil && imageConf.Build.Disabled != nil && *imageConf.Build.Disabled == true {
continue
}
// This is necessary for parallel build otherwise we would override the image conf pointer during the loop
cImageConf := *imageConf
imageName := cImageConf.Image
// Get image tag
imageTag, _ := randutil.GenerateRandomString(7)
if len(imageConf.Tags) > 0 {
imageTag = imageConf.Tags[0]
}
builtImages[imageName] = imageTag
}
return &FakeController{
BuiltImages: builtImages,
}
}
// Build builds the images
func (f *FakeController) Build(options *build.Options, log log.Logger) (map[string]string, error) {
return f.BuiltImages, nil
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"math"
"math/rand"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
_ "unsafe"
"github.com/cockroachdb/pebble"
"github.com/docker/go-units"
"github.com/google/uuid"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/errorpb"
sst "github.com/pingcap/kvproto/pkg/import_sstpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/external"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/membuf"
"github.com/pingcap/tidb/br/pkg/pdutil"
"github.com/pingcap/tidb/br/pkg/restore/split"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/keyspace"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/engine"
"github.com/pingcap/tidb/util/hack"
"github.com/stretchr/testify/require"
pd "github.com/tikv/pd/client"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/status"
)
func TestNextKey(t *testing.T) {
require.Equal(t, []byte{}, nextKey([]byte{}))
cases := [][]byte{
{0},
{255},
{1, 255},
}
for _, b := range cases {
next := nextKey(b)
require.Equal(t, append(b, 0), next)
}
// in the old logic, this should return []byte{} which is not the actually smallest eky
next := nextKey([]byte{1, 255})
require.Equal(t, -1, bytes.Compare(next, []byte{2}))
// another test case, nextkey()'s return should be smaller than key with a prefix of the origin key
next = nextKey([]byte{1, 255})
require.Equal(t, -1, bytes.Compare(next, []byte{1, 255, 0, 1, 2}))
// test recode key
// key with int handle
for _, handleID := range []int64{math.MinInt64, 1, 255, math.MaxInt32 - 1} {
key := tablecodec.EncodeRowKeyWithHandle(1, tidbkv.IntHandle(handleID))
require.Equal(t, []byte(tablecodec.EncodeRowKeyWithHandle(1, tidbkv.IntHandle(handleID+1))), nextKey(key))
}
// overflowed
key := tablecodec.EncodeRowKeyWithHandle(1, tidbkv.IntHandle(math.MaxInt64))
next = tablecodec.EncodeTablePrefix(2)
require.Less(t, string(key), string(next))
require.Equal(t, next, nextKey(key))
testDatums := [][]types.Datum{
{types.NewIntDatum(1), types.NewIntDatum(2)},
{types.NewIntDatum(255), types.NewIntDatum(256)},
{types.NewIntDatum(math.MaxInt32), types.NewIntDatum(math.MaxInt32 + 1)},
{types.NewStringDatum("test"), types.NewStringDatum("test\000")},
{types.NewStringDatum("test\255"), types.NewStringDatum("test\255\000")},
}
stmtCtx := new(stmtctx.StatementContext)
for _, datums := range testDatums {
keyBytes, err := codec.EncodeKey(stmtCtx, nil, types.NewIntDatum(123), datums[0])
require.NoError(t, err)
h, err := tidbkv.NewCommonHandle(keyBytes)
require.NoError(t, err)
key := tablecodec.EncodeRowKeyWithHandle(1, h)
nextKeyBytes, err := codec.EncodeKey(stmtCtx, nil, types.NewIntDatum(123), datums[1])
require.NoError(t, err)
nextHdl, err := tidbkv.NewCommonHandle(nextKeyBytes)
require.NoError(t, err)
nextValidKey := []byte(tablecodec.EncodeRowKeyWithHandle(1, nextHdl))
// nextKey may return a key that can't be decoded, but it must not be larger than the valid next key.
require.True(t, bytes.Compare(nextKey(key), nextValidKey) <= 0, "datums: %v", datums)
}
// a special case that when len(string datum) % 8 == 7, nextKey twice should not panic.
keyBytes, err := codec.EncodeKey(stmtCtx, nil, types.NewStringDatum("1234567"))
require.NoError(t, err)
h, err := tidbkv.NewCommonHandle(keyBytes)
require.NoError(t, err)
key = tablecodec.EncodeRowKeyWithHandle(1, h)
nextOnce := nextKey(key)
// should not panic
_ = nextKey(nextOnce)
// dIAAAAAAAAD/PV9pgAAAAAD/AAABA4AAAAD/AAAAAQOAAAD/AAAAAAEAAAD8
// a index key with: table: 61, index: 1, int64: 1, int64: 1
a := []byte{116, 128, 0, 0, 0, 0, 0, 0, 255, 61, 95, 105, 128, 0, 0, 0, 0, 255, 0, 0, 1, 3, 128, 0, 0, 0, 255, 0, 0, 0, 1, 3, 128, 0, 0, 255, 0, 0, 0, 0, 1, 0, 0, 0, 252}
require.Equal(t, append(a, 0), nextKey(a))
}
// The first half of this test is same as the test in tikv:
// https://github.com/tikv/tikv/blob/dbfe7730dd0fddb34cb8c3a7f8a079a1349d2d41/components/engine_rocks/src/properties.rs#L572
func TestRangeProperties(t *testing.T) {
type testCase struct {
key []byte
vLen int
count int
}
cases := []testCase{
// handle "a": size(size = 1, offset = 1),keys(1,1)
{[]byte("a"), 0, 1},
{[]byte("b"), defaultPropSizeIndexDistance / 8, 1},
{[]byte("c"), defaultPropSizeIndexDistance / 4, 1},
{[]byte("d"), defaultPropSizeIndexDistance / 2, 1},
{[]byte("e"), defaultPropSizeIndexDistance / 8, 1},
// handle "e": size(size = DISTANCE + 4, offset = DISTANCE + 5),keys(4,5)
{[]byte("f"), defaultPropSizeIndexDistance / 4, 1},
{[]byte("g"), defaultPropSizeIndexDistance / 2, 1},
{[]byte("h"), defaultPropSizeIndexDistance / 8, 1},
{[]byte("i"), defaultPropSizeIndexDistance / 4, 1},
// handle "i": size(size = DISTANCE / 8 * 9 + 4, offset = DISTANCE / 8 * 17 + 9),keys(4,5)
{[]byte("j"), defaultPropSizeIndexDistance / 2, 1},
{[]byte("k"), defaultPropSizeIndexDistance / 2, 1},
// handle "k": size(size = DISTANCE + 2, offset = DISTANCE / 8 * 25 + 11),keys(2,11)
{[]byte("l"), 0, defaultPropKeysIndexDistance / 2},
{[]byte("m"), 0, defaultPropKeysIndexDistance / 2},
// handle "m": keys = DEFAULT_PROP_KEYS_INDEX_DISTANCE,offset = 11+DEFAULT_PROP_KEYS_INDEX_DISTANCE
{[]byte("n"), 1, defaultPropKeysIndexDistance},
// handle "n": keys = DEFAULT_PROP_KEYS_INDEX_DISTANCE, offset = 11+2*DEFAULT_PROP_KEYS_INDEX_DISTANCE
{[]byte("o"), 1, 1},
// handle "o": keys = 1, offset = 12 + 2*DEFAULT_PROP_KEYS_INDEX_DISTANCE
}
collector := newRangePropertiesCollector()
for _, p := range cases {
v := make([]byte, p.vLen)
for i := 0; i < p.count; i++ {
_ = collector.Add(pebble.InternalKey{UserKey: p.key, Trailer: uint64(pebble.InternalKeyKindSet)}, v)
}
}
userProperties := make(map[string]string, 1)
_ = collector.Finish(userProperties)
props, err := decodeRangeProperties(hack.Slice(userProperties[propRangeIndex]), common.NoopKeyAdapter{})
require.NoError(t, err)
// Smallest key in props.
require.Equal(t, cases[0].key, props[0].Key)
// Largest key in props.
require.Equal(t, cases[len(cases)-1].key, props[len(props)-1].Key)
require.Len(t, props, 7)
props2 := rangeProperties([]rangeProperty{
{[]byte("b"), rangeOffsets{defaultPropSizeIndexDistance + 10, defaultPropKeysIndexDistance / 2}},
{[]byte("h"), rangeOffsets{defaultPropSizeIndexDistance * 3 / 2, defaultPropKeysIndexDistance * 3 / 2}},
{[]byte("k"), rangeOffsets{defaultPropSizeIndexDistance * 3, defaultPropKeysIndexDistance * 7 / 4}},
{[]byte("mm"), rangeOffsets{defaultPropSizeIndexDistance * 5, defaultPropKeysIndexDistance * 2}},
{[]byte("q"), rangeOffsets{defaultPropSizeIndexDistance * 7, defaultPropKeysIndexDistance*9/4 + 10}},
{[]byte("y"), rangeOffsets{defaultPropSizeIndexDistance*7 + 100, defaultPropKeysIndexDistance*9/4 + 1010}},
})
sizeProps := newSizeProperties()
sizeProps.addAll(props)
sizeProps.addAll(props2)
res := []*rangeProperty{
{[]byte("a"), rangeOffsets{1, 1}},
{[]byte("b"), rangeOffsets{defaultPropSizeIndexDistance + 10, defaultPropKeysIndexDistance / 2}},
{[]byte("e"), rangeOffsets{defaultPropSizeIndexDistance + 4, 4}},
{[]byte("h"), rangeOffsets{defaultPropSizeIndexDistance/2 - 10, defaultPropKeysIndexDistance}},
{[]byte("i"), rangeOffsets{defaultPropSizeIndexDistance*9/8 + 4, 4}},
{[]byte("k"), rangeOffsets{defaultPropSizeIndexDistance*5/2 + 2, defaultPropKeysIndexDistance/4 + 2}},
{[]byte("m"), rangeOffsets{defaultPropKeysIndexDistance, defaultPropKeysIndexDistance}},
{[]byte("mm"), rangeOffsets{defaultPropSizeIndexDistance * 2, defaultPropKeysIndexDistance / 4}},
{[]byte("n"), rangeOffsets{defaultPropKeysIndexDistance * 2, defaultPropKeysIndexDistance}},
{[]byte("o"), rangeOffsets{2, 1}},
{[]byte("q"), rangeOffsets{defaultPropSizeIndexDistance * 2, defaultPropKeysIndexDistance/4 + 10}},
{[]byte("y"), rangeOffsets{100, 1000}},
}
require.Equal(t, 12, sizeProps.indexHandles.Len())
idx := 0
sizeProps.iter(func(p *rangeProperty) bool {
require.Equal(t, res[idx], p)
idx++
return true
})
fullRange := Range{start: []byte("a"), end: []byte("z")}
ranges := splitRangeBySizeProps(fullRange, sizeProps, 2*defaultPropSizeIndexDistance, defaultPropKeysIndexDistance*5/2)
require.Equal(t, []Range{
{start: []byte("a"), end: []byte("e")},
{start: []byte("e"), end: []byte("k")},
{start: []byte("k"), end: []byte("mm")},
{start: []byte("mm"), end: []byte("q")},
{start: []byte("q"), end: []byte("z")},
}, ranges)
ranges = splitRangeBySizeProps(fullRange, sizeProps, 2*defaultPropSizeIndexDistance, defaultPropKeysIndexDistance)
require.Equal(t, []Range{
{start: []byte("a"), end: []byte("e")},
{start: []byte("e"), end: []byte("h")},
{start: []byte("h"), end: []byte("k")},
{start: []byte("k"), end: []byte("m")},
{start: []byte("m"), end: []byte("mm")},
{start: []byte("mm"), end: []byte("n")},
{start: []byte("n"), end: []byte("q")},
{start: []byte("q"), end: []byte("z")},
}, ranges)
}
func TestRangePropertiesWithPebble(t *testing.T) {
sizeDistance := uint64(500)
keysDistance := uint64(20)
opt := &pebble.Options{
MemTableSize: 512 * units.MiB,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
MaxOpenFiles: 10000,
DisableWAL: true,
ReadOnly: false,
TablePropertyCollectors: []func() pebble.TablePropertyCollector{
func() pebble.TablePropertyCollector {
return &RangePropertiesCollector{
props: make([]rangeProperty, 0, 1024),
propSizeIdxDistance: sizeDistance,
propKeysIdxDistance: keysDistance,
}
},
},
}
db, _ := makePebbleDB(t, opt)
defer db.Close()
// local collector
collector := &RangePropertiesCollector{
props: make([]rangeProperty, 0, 1024),
propSizeIdxDistance: sizeDistance,
propKeysIdxDistance: keysDistance,
}
writeOpt := &pebble.WriteOptions{Sync: false}
value := make([]byte, 100)
for i := 0; i < 10; i++ {
wb := db.NewBatch()
for j := 0; j < 100; j++ {
key := make([]byte, 8)
valueLen := rand.Intn(50)
binary.BigEndian.PutUint64(key, uint64(i*100+j))
err := wb.Set(key, value[:valueLen], writeOpt)
require.NoError(t, err)
err = collector.Add(pebble.InternalKey{UserKey: key, Trailer: uint64(pebble.InternalKeyKindSet)}, value[:valueLen])
require.NoError(t, err)
}
require.NoError(t, wb.Commit(writeOpt))
}
// flush one sst
require.NoError(t, db.Flush())
props := make(map[string]string, 1)
require.NoError(t, collector.Finish(props))
sstMetas, err := db.SSTables(pebble.WithProperties())
require.NoError(t, err)
for i, level := range sstMetas {
if i == 0 {
require.Equal(t, 1, len(level))
} else {
require.Empty(t, level)
}
}
require.Equal(t, props, sstMetas[0][0].Properties.UserProperties)
}
func testLocalWriter(t *testing.T, needSort bool, partitialSort bool) {
opt := &pebble.Options{
MemTableSize: 1024 * 1024,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
DisableWAL: true,
ReadOnly: false,
}
db, tmpPath := makePebbleDB(t, opt)
defer db.Close()
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: common.NoopKeyAdapter{},
logger: log.L(),
}
f.db.Store(db)
f.sstIngester = dbSSTIngester{e: f}
f.wg.Add(1)
go f.ingestSSTLoop()
sorted := needSort && !partitialSort
pool := membuf.NewPool()
defer pool.Destroy()
kvBuffer := pool.NewBuffer()
w, err := openLocalWriter(&backend.LocalWriterConfig{IsKVSorted: sorted}, f, keyspace.CodecV1, 1024, kvBuffer)
require.NoError(t, err)
ctx := context.Background()
var kvs []common.KvPair
value := make([]byte, 128)
for i := 0; i < 16; i++ {
binary.BigEndian.PutUint64(value[i*8:], uint64(i))
}
var keys [][]byte
for i := 1; i <= 20000; i++ {
var kv common.KvPair
kv.Key = make([]byte, 16)
kv.Val = make([]byte, 128)
copy(kv.Val, value)
key := rand.Intn(1000)
binary.BigEndian.PutUint64(kv.Key, uint64(key))
binary.BigEndian.PutUint64(kv.Key[8:], uint64(i))
kvs = append(kvs, kv)
keys = append(keys, kv.Key)
}
var rows1 []common.KvPair
var rows2 []common.KvPair
var rows3 []common.KvPair
rows4 := kvs[:12000]
if partitialSort {
sort.Slice(rows4, func(i, j int) bool {
return bytes.Compare(rows4[i].Key, rows4[j].Key) < 0
})
rows1 = rows4[:6000]
rows3 = rows4[6000:]
rows2 = kvs[12000:]
} else {
if needSort {
sort.Slice(kvs, func(i, j int) bool {
return bytes.Compare(kvs[i].Key, kvs[j].Key) < 0
})
}
rows1 = kvs[:6000]
rows2 = kvs[6000:12000]
rows3 = kvs[12000:]
}
err = w.AppendRows(ctx, []string{}, kv.MakeRowsFromKvPairs(rows1))
require.NoError(t, err)
err = w.AppendRows(ctx, []string{}, kv.MakeRowsFromKvPairs(rows2))
require.NoError(t, err)
err = w.AppendRows(ctx, []string{}, kv.MakeRowsFromKvPairs(rows3))
require.NoError(t, err)
flushStatus, err := w.Close(context.Background())
require.NoError(t, err)
require.NoError(t, f.flushEngineWithoutLock(ctx))
require.True(t, flushStatus.Flushed())
o := &pebble.IterOptions{}
it := db.NewIter(o)
sort.Slice(keys, func(i, j int) bool {
return bytes.Compare(keys[i], keys[j]) < 0
})
require.Equal(t, 20000, int(f.Length.Load()))
require.Equal(t, 144*20000, int(f.TotalSize.Load()))
valid := it.SeekGE(keys[0])
require.True(t, valid)
for _, k := range keys {
require.Equal(t, k, it.Key())
it.Next()
}
close(f.sstMetasChan)
f.wg.Wait()
}
func TestLocalWriterWithSort(t *testing.T) {
testLocalWriter(t, false, false)
}
func TestLocalWriterWithIngest(t *testing.T) {
testLocalWriter(t, true, false)
}
func TestLocalWriterWithIngestUnsort(t *testing.T) {
testLocalWriter(t, true, true)
}
type mockSplitClient struct {
split.SplitClient
}
func (c *mockSplitClient) GetRegion(ctx context.Context, key []byte) (*split.RegionInfo, error) {
return &split.RegionInfo{
Leader: &metapb.Peer{Id: 1},
Region: &metapb.Region{
Id: 1,
StartKey: key,
},
}, nil
}
type testIngester struct{}
func (i testIngester) mergeSSTs(metas []*sstMeta, dir string) (*sstMeta, error) {
if len(metas) == 0 {
return nil, errors.New("sst metas is empty")
} else if len(metas) == 1 {
return metas[0], nil
}
if metas[len(metas)-1].seq-metas[0].seq != int32(len(metas)-1) {
panic("metas is not add in order")
}
newMeta := &sstMeta{
seq: metas[len(metas)-1].seq,
}
for _, m := range metas {
newMeta.totalSize += m.totalSize
newMeta.totalCount += m.totalCount
}
return newMeta, nil
}
func (i testIngester) ingest([]*sstMeta) error {
return nil
}
func TestLocalIngestLoop(t *testing.T) {
opt := &pebble.Options{
MemTableSize: 1024 * 1024,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
DisableWAL: true,
ReadOnly: false,
}
db, tmpPath := makePebbleDB(t, opt)
defer db.Close()
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel := context.WithCancel(context.Background())
f := Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
config: backend.LocalEngineConfig{
Compact: true,
CompactThreshold: 100,
CompactConcurrency: 4,
},
logger: log.L(),
}
f.db.Store(db)
f.sstIngester = testIngester{}
f.wg.Add(1)
go f.ingestSSTLoop()
// add some routines to add ssts
var wg sync.WaitGroup
wg.Add(4)
totalSize := int64(0)
concurrency := 4
count := 500
var metaSeqLock sync.Mutex
maxMetaSeq := int32(0)
for i := 0; i < concurrency; i++ {
go func() {
defer wg.Done()
flushCnt := rand.Int31n(10) + 1
seq := int32(0)
for i := 0; i < count; i++ {
size := int64(rand.Int31n(50) + 1)
m := &sstMeta{totalSize: size, totalCount: 1}
atomic.AddInt64(&totalSize, size)
metaSeq, err := f.addSST(engineCtx, m)
require.NoError(t, err)
if int32(i) >= flushCnt {
f.mutex.RLock()
err = f.flushEngineWithoutLock(engineCtx)
require.NoError(t, err)
f.mutex.RUnlock()
flushCnt += rand.Int31n(10) + 1
}
seq = metaSeq
}
metaSeqLock.Lock()
if atomic.LoadInt32(&maxMetaSeq) < seq {
atomic.StoreInt32(&maxMetaSeq, seq)
}
metaSeqLock.Unlock()
}()
}
wg.Wait()
f.mutex.RLock()
err := f.flushEngineWithoutLock(engineCtx)
require.NoError(t, err)
f.mutex.RUnlock()
close(f.sstMetasChan)
f.wg.Wait()
require.NoError(t, f.ingestErr.Get())
require.Equal(t, f.TotalSize.Load(), totalSize)
require.Equal(t, int64(concurrency*count), f.Length.Load())
require.Equal(t, atomic.LoadInt32(&maxMetaSeq), f.finishedMetaSeq.Load())
}
func makeRanges(input []string) []Range {
ranges := make([]Range, 0, len(input)/2)
for i := 0; i < len(input)-1; i += 2 {
ranges = append(ranges, Range{start: []byte(input[i]), end: []byte(input[i+1])})
}
return ranges
}
func testMergeSSTs(t *testing.T, kvs [][]common.KvPair, meta *sstMeta) {
opt := &pebble.Options{
MemTableSize: 1024 * 1024,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
DisableWAL: true,
ReadOnly: false,
}
db, tmpPath := makePebbleDB(t, opt)
defer db.Close()
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
config: backend.LocalEngineConfig{
Compact: true,
CompactThreshold: 100,
CompactConcurrency: 4,
},
logger: log.L(),
}
f.db.Store(db)
createSSTWriter := func() (*sstWriter, error) {
path := filepath.Join(f.sstDir, uuid.New().String()+".sst")
writer, err := newSSTWriter(path)
if err != nil {
return nil, err
}
sw := &sstWriter{sstMeta: &sstMeta{path: path}, writer: writer}
return sw, nil
}
metas := make([]*sstMeta, 0, len(kvs))
for _, kv := range kvs {
w, err := createSSTWriter()
require.NoError(t, err)
err = w.writeKVs(kv)
require.NoError(t, err)
require.NoError(t, w.writer.Close())
metas = append(metas, w.sstMeta)
}
i := dbSSTIngester{e: f}
newMeta, err := i.mergeSSTs(metas, tmpPath)
require.NoError(t, err)
require.Equal(t, meta.totalCount, newMeta.totalCount)
require.Equal(t, meta.totalSize, newMeta.totalSize)
}
func TestMergeSSTs(t *testing.T) {
kvs := make([][]common.KvPair, 0, 5)
for i := 0; i < 5; i++ {
var pairs []common.KvPair
for j := 0; j < 10; j++ {
var kv common.KvPair
kv.Key = make([]byte, 16)
key := i*100 + j
binary.BigEndian.PutUint64(kv.Key, uint64(key))
pairs = append(pairs, kv)
}
kvs = append(kvs, pairs)
}
testMergeSSTs(t, kvs, &sstMeta{totalCount: 50, totalSize: 800})
}
func TestMergeSSTsDuplicated(t *testing.T) {
kvs := make([][]common.KvPair, 0, 5)
for i := 0; i < 4; i++ {
var pairs []common.KvPair
for j := 0; j < 10; j++ {
var kv common.KvPair
kv.Key = make([]byte, 16)
key := i*100 + j
binary.BigEndian.PutUint64(kv.Key, uint64(key))
pairs = append(pairs, kv)
}
kvs = append(kvs, pairs)
}
// make a duplication
kvs = append(kvs, kvs[0])
testMergeSSTs(t, kvs, &sstMeta{totalCount: 40, totalSize: 640})
}
type mockPdClient struct {
pd.Client
stores []*metapb.Store
regions []*pd.Region
}
func (c *mockPdClient) GetAllStores(ctx context.Context, opts ...pd.GetStoreOption) ([]*metapb.Store, error) {
return c.stores, nil
}
func (c *mockPdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*pd.Region, error) {
return c.regions, nil
}
type mockGrpcErr struct{}
func (e mockGrpcErr) GRPCStatus() *status.Status {
return status.New(codes.Unimplemented, "unimplemented")
}
func (e mockGrpcErr) Error() string {
return "unimplemented"
}
type mockImportClient struct {
sst.ImportSSTClient
store *metapb.Store
resp *sst.IngestResponse
onceResp *atomic.Pointer[sst.IngestResponse]
err error
retry int
cnt int
multiIngestCheckFn func(s *metapb.Store) bool
apiInvokeRecorder map[string][]uint64
}
func newMockImportClient() *mockImportClient {
return &mockImportClient{
multiIngestCheckFn: func(s *metapb.Store) bool {
return true
},
}
}
func (c *mockImportClient) MultiIngest(_ context.Context, req *sst.MultiIngestRequest, _ ...grpc.CallOption) (*sst.IngestResponse, error) {
defer func() {
c.cnt++
}()
for _, meta := range req.Ssts {
if meta.RegionId != c.store.GetId() {
return &sst.IngestResponse{Error: &errorpb.Error{Message: "The file which would be ingested doest not exist."}}, nil
}
}
if c.apiInvokeRecorder != nil {
c.apiInvokeRecorder["MultiIngest"] = append(c.apiInvokeRecorder["MultiIngest"], c.store.GetId())
}
if c.cnt < c.retry {
if c.err != nil {
return c.resp, c.err
}
if c.onceResp != nil {
resp := c.onceResp.Swap(&sst.IngestResponse{})
return resp, nil
}
if c.resp != nil {
return c.resp, nil
}
}
if !c.multiIngestCheckFn(c.store) {
return nil, mockGrpcErr{}
}
return &sst.IngestResponse{}, nil
}
type mockWriteClient struct {
sst.ImportSST_WriteClient
writeResp *sst.WriteResponse
}
func (m mockWriteClient) Send(request *sst.WriteRequest) error {
return nil
}
func (m mockWriteClient) CloseAndRecv() (*sst.WriteResponse, error) {
return m.writeResp, nil
}
type baseCodec interface {
Marshal(v interface{}) ([]byte, error)
Unmarshal(data []byte, v interface{}) error
}
//go:linkname newContextWithRPCInfo google.golang.org/grpc.newContextWithRPCInfo
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp grpc.Compressor, comp encoding.Compressor) context.Context
type mockCodec struct{}
func (m mockCodec) Marshal(v interface{}) ([]byte, error) {
return nil, nil
}
func (m mockCodec) Unmarshal(data []byte, v interface{}) error {
return nil
}
func (m mockWriteClient) Context() context.Context {
ctx := context.Background()
return newContextWithRPCInfo(ctx, false, mockCodec{}, nil, nil)
}
func (m mockWriteClient) SendMsg(_ interface{}) error {
return nil
}
func (c *mockImportClient) Write(ctx context.Context, opts ...grpc.CallOption) (sst.ImportSST_WriteClient, error) {
if c.apiInvokeRecorder != nil {
c.apiInvokeRecorder["Write"] = append(c.apiInvokeRecorder["Write"], c.store.GetId())
}
return mockWriteClient{writeResp: &sst.WriteResponse{Metas: []*sst.SSTMeta{
{RegionId: c.store.GetId()},
}}}, nil
}
type mockImportClientFactory struct {
stores []*metapb.Store
createClientFn func(store *metapb.Store) sst.ImportSSTClient
apiInvokeRecorder map[string][]uint64
}
func (f *mockImportClientFactory) Create(_ context.Context, storeID uint64) (sst.ImportSSTClient, error) {
for _, store := range f.stores {
if store.Id == storeID {
return f.createClientFn(store), nil
}
}
return nil, fmt.Errorf("store %d not found", storeID)
}
func (f *mockImportClientFactory) Close() {}
func TestMultiIngest(t *testing.T) {
allStores := []*metapb.Store{
{
Id: 1,
State: metapb.StoreState_Offline,
},
{
Id: 2,
State: metapb.StoreState_Tombstone,
Labels: []*metapb.StoreLabel{
{
Key: "test",
Value: "tiflash",
},
},
},
{
Id: 3,
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "test",
Value: "123",
},
},
},
{
Id: 4,
State: metapb.StoreState_Tombstone,
Labels: []*metapb.StoreLabel{
{
Key: "engine",
Value: "test",
},
},
},
{
Id: 5,
State: metapb.StoreState_Tombstone,
Labels: []*metapb.StoreLabel{
{
Key: "engine",
Value: "test123",
},
},
},
{
Id: 6,
State: metapb.StoreState_Offline,
Labels: []*metapb.StoreLabel{
{
Key: "engine",
Value: "tiflash",
},
},
},
{
Id: 7,
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "test",
Value: "123",
},
{
Key: "engine",
Value: "tiflash",
},
},
},
{
Id: 8,
State: metapb.StoreState_Up,
},
}
cases := []struct {
filter func(store *metapb.Store) bool
multiIngestSupport func(s *metapb.Store) bool
retry int
err error
supportMutliIngest bool
retErr string
}{
// test up stores with all support multiIngest
{
func(store *metapb.Store) bool {
return store.State == metapb.StoreState_Up
},
func(s *metapb.Store) bool {
return true
},
0,
nil,
true,
"",
},
// test all up stores with tiflash not support multi ingest
{
func(store *metapb.Store) bool {
return store.State == metapb.StoreState_Up
},
func(s *metapb.Store) bool {
return !engine.IsTiFlash(s)
},
0,
nil,
true,
"",
},
// test all up stores with only tiflash support multi ingest
{
func(store *metapb.Store) bool {
return store.State == metapb.StoreState_Up
},
func(s *metapb.Store) bool {
return engine.IsTiFlash(s)
},
0,
nil,
false,
"",
},
// test all up stores with some non-tiflash store support multi ingest
{
func(store *metapb.Store) bool {
return store.State == metapb.StoreState_Up
},
func(s *metapb.Store) bool {
return len(s.Labels) > 0
},
0,
nil,
false,
"",
},
// test all stores with all states
{
func(store *metapb.Store) bool {
return true
},
func(s *metapb.Store) bool {
return true
},
0,
nil,
true,
"",
},
// test all non-tiflash stores that support multi ingests
{
func(store *metapb.Store) bool {
return !engine.IsTiFlash(store)
},
func(s *metapb.Store) bool {
return !engine.IsTiFlash(s)
},
0,
nil,
true,
"",
},
// test only up stores support multi ingest
{
func(store *metapb.Store) bool {
return true
},
func(s *metapb.Store) bool {
return s.State == metapb.StoreState_Up
},
0,
nil,
true,
"",
},
// test only offline/tombstore stores support multi ingest
{
func(store *metapb.Store) bool {
return true
},
func(s *metapb.Store) bool {
return s.State != metapb.StoreState_Up
},
0,
nil,
false,
"",
},
// test grpc return error but no tiflash
{
func(store *metapb.Store) bool {
return !engine.IsTiFlash(store)
},
func(s *metapb.Store) bool {
return true
},
math.MaxInt32,
errors.New("mock error"),
false,
"",
},
// test grpc return error and contains offline tiflash
{
func(store *metapb.Store) bool {
return !engine.IsTiFlash(store) || store.State != metapb.StoreState_Up
},
func(s *metapb.Store) bool {
return true
},
math.MaxInt32,
errors.New("mock error"),
false,
"",
},
// test grpc return error
{
func(store *metapb.Store) bool {
return true
},
func(s *metapb.Store) bool {
return true
},
math.MaxInt32,
errors.New("mock error"),
false,
"mock error",
},
// test grpc return error only once
{
func(store *metapb.Store) bool {
return true
},
func(s *metapb.Store) bool {
return true
},
1,
errors.New("mock error"),
true,
"",
},
}
for _, testCase := range cases {
stores := make([]*metapb.Store, 0, len(allStores))
for _, s := range allStores {
if testCase.filter(s) {
stores = append(stores, s)
}
}
importCli := &mockImportClient{
cnt: 0,
retry: testCase.retry,
err: testCase.err,
multiIngestCheckFn: testCase.multiIngestSupport,
}
pdCtl := &pdutil.PdController{}
pdCtl.SetPDClient(&mockPdClient{stores: stores})
local := &Backend{
pdCtl: pdCtl,
importClientFactory: &mockImportClientFactory{
stores: allStores,
createClientFn: func(store *metapb.Store) sst.ImportSSTClient {
importCli.store = store
return importCli
},
},
logger: log.L(),
}
err := local.checkMultiIngestSupport(context.Background())
if err != nil {
require.Contains(t, err.Error(), testCase.retErr)
} else {
require.Equal(t, testCase.supportMutliIngest, local.supportMultiIngest)
}
}
}
func TestLocalWriteAndIngestPairsFailFast(t *testing.T) {
bak := Backend{}
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace", "return(true)"))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace"))
}()
jobCh := make(chan *regionJob, 1)
jobCh <- ®ionJob{}
jobOutCh := make(chan *regionJob, 1)
err := bak.startWorker(context.Background(), jobCh, jobOutCh, nil)
require.Error(t, err)
require.Regexp(t, "the remaining storage capacity of TiKV.*", err.Error())
require.Len(t, jobCh, 0)
}
func TestGetRegionSplitSizeKeys(t *testing.T) {
allStores := []*metapb.Store{
{
Address: "172.16.102.1:20160",
StatusAddress: "0.0.0.0:20180",
},
{
Address: "172.16.102.2:20160",
StatusAddress: "0.0.0.0:20180",
},
{
Address: "172.16.102.3:20160",
StatusAddress: "0.0.0.0:20180",
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cli := utils.FakePDClient{Stores: allStores}
defer func() {
getSplitConfFromStoreFunc = getSplitConfFromStore
}()
getSplitConfFromStoreFunc = func(ctx context.Context, host string, tls *common.TLS) (int64, int64, error) {
if strings.Contains(host, "172.16.102.3:20180") {
return int64(1), int64(2), nil
}
return 0, 0, errors.New("invalid connection")
}
splitSize, splitKeys, err := getRegionSplitSizeKeys(ctx, cli, nil)
require.NoError(t, err)
require.Equal(t, int64(1), splitSize)
require.Equal(t, int64(2), splitKeys)
}
func TestLocalIsRetryableTiKVWriteError(t *testing.T) {
l := Backend{}
require.True(t, l.isRetryableImportTiKVError(io.EOF))
require.True(t, l.isRetryableImportTiKVError(errors.Trace(io.EOF)))
}
// mockIngestData must be ordered on the first element of each [2][]byte.
type mockIngestData [][2][]byte
func (m mockIngestData) GetFirstAndLastKey(lowerBound, upperBound []byte) ([]byte, []byte, error) {
i, j := m.getFirstAndLastKeyIdx(lowerBound, upperBound)
if i == -1 {
return nil, nil, nil
}
return m[i][0], m[j][0], nil
}
func (m mockIngestData) getFirstAndLastKeyIdx(lowerBound, upperBound []byte) (int, int) {
var first int
if len(lowerBound) == 0 {
first = 0
} else {
i, _ := sort.Find(len(m), func(i int) int {
return bytes.Compare(lowerBound, m[i][0])
})
if i == len(m) {
return -1, -1
}
first = i
}
var last int
if len(upperBound) == 0 {
last = len(m) - 1
} else {
i, _ := sort.Find(len(m), func(i int) int {
return bytes.Compare(upperBound, m[i][1])
})
if i == 0 {
return -1, -1
}
last = i - 1
}
return first, last
}
type mockIngestIter struct {
data mockIngestData
startIdx, endIdx, curIdx int
}
func (m *mockIngestIter) First() bool {
m.curIdx = m.startIdx
return true
}
func (m *mockIngestIter) Valid() bool { return m.curIdx < m.endIdx }
func (m *mockIngestIter) Next() bool {
m.curIdx++
return m.Valid()
}
func (m *mockIngestIter) Key() []byte { return m.data[m.curIdx][0] }
func (m *mockIngestIter) Value() []byte { return m.data[m.curIdx][1] }
func (m *mockIngestIter) Close() error { return nil }
func (m *mockIngestIter) Error() error { return nil }
func (m mockIngestData) NewIter(ctx context.Context, lowerBound, upperBound []byte) common.ForwardIter {
i, j := m.getFirstAndLastKeyIdx(lowerBound, upperBound)
return &mockIngestIter{data: m, startIdx: i, endIdx: j, curIdx: i}
}
func (m mockIngestData) GetTS() uint64 { return 0 }
func (m mockIngestData) Finish(_, _ int64) {}
func TestCheckPeersBusy(t *testing.T) {
backup := maxRetryBackoffSecond
maxRetryBackoffSecond = 300
t.Cleanup(func() {
maxRetryBackoffSecond = backup
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
apiInvokeRecorder := map[string][]uint64{}
serverIsBusyResp := &sst.IngestResponse{
Error: &errorpb.Error{
ServerIsBusy: &errorpb.ServerIsBusy{},
}}
createTimeStore12 := 0
local := &Backend{
importClientFactory: &mockImportClientFactory{
stores: []*metapb.Store{
{Id: 11}, {Id: 12}, {Id: 13}, // region ["a", "b")
{Id: 21}, {Id: 22}, {Id: 23}, // region ["b", "")
},
createClientFn: func(store *metapb.Store) sst.ImportSSTClient {
importCli := newMockImportClient()
importCli.store = store
importCli.apiInvokeRecorder = apiInvokeRecorder
if store.Id == 12 {
createTimeStore12++
// the second time is checkWriteStall, we mock a busy response
if createTimeStore12 == 2 {
importCli.retry = 1
importCli.resp = serverIsBusyResp
}
}
return importCli
},
},
logger: log.L(),
writeLimiter: noopStoreWriteLimiter{},
bufferPool: membuf.NewPool(),
supportMultiIngest: true,
BackendConfig: BackendConfig{
ShouldCheckWriteStall: true,
},
tikvCodec: keyspace.CodecV1,
}
data := mockIngestData{{[]byte("a"), []byte("a")}, {[]byte("b"), []byte("b")}}
jobCh := make(chan *regionJob, 10)
retryJob := ®ionJob{
keyRange: Range{start: []byte("a"), end: []byte("b")},
region: &split.RegionInfo{
Region: &metapb.Region{
Id: 1,
Peers: []*metapb.Peer{
{Id: 1, StoreId: 11}, {Id: 2, StoreId: 12}, {Id: 3, StoreId: 13},
},
StartKey: []byte("a"),
EndKey: []byte("b"),
},
Leader: &metapb.Peer{Id: 1, StoreId: 11},
},
stage: regionScanned,
ingestData: data,
retryCount: 20,
waitUntil: time.Now().Add(-time.Second),
}
jobCh <- retryJob
jobCh <- ®ionJob{
keyRange: Range{start: []byte("b"), end: []byte("")},
region: &split.RegionInfo{
Region: &metapb.Region{
Id: 4,
Peers: []*metapb.Peer{
{Id: 4, StoreId: 21}, {Id: 5, StoreId: 22}, {Id: 6, StoreId: 23},
},
StartKey: []byte("b"),
EndKey: []byte(""),
},
Leader: &metapb.Peer{Id: 4, StoreId: 21},
},
stage: regionScanned,
ingestData: data,
retryCount: 20,
waitUntil: time.Now().Add(-time.Second),
}
retryJobs := make(chan *regionJob, 1)
var wg sync.WaitGroup
wg.Add(1)
jobOutCh := make(chan *regionJob)
go func() {
job := <-jobOutCh
job.retryCount++
retryJobs <- job
<-jobOutCh
wg.Done()
}()
wg.Add(1)
go func() {
defer wg.Done()
err := local.startWorker(ctx, jobCh, jobOutCh, nil)
require.NoError(t, err)
}()
require.Eventually(t, func() bool {
return len(retryJobs) == 1
}, 300*time.Second, time.Second)
j := <-retryJobs
require.Same(t, retryJob, j)
require.Equal(t, 21, retryJob.retryCount)
require.Equal(t, wrote, retryJob.stage)
cancel()
wg.Wait()
require.Equal(t, []uint64{11, 12, 13, 21, 22, 23}, apiInvokeRecorder["Write"])
// store 12 has a follower busy, so it will break the workflow for region (11, 12, 13)
require.Equal(t, []uint64{11, 12, 21, 22, 23, 21}, apiInvokeRecorder["MultiIngest"])
// region (11, 12, 13) has key range ["a", "b"), it's not finished.
require.Equal(t, []byte("a"), retryJob.keyRange.start)
require.Equal(t, []byte("b"), retryJob.keyRange.end)
}
func TestNotLeaderErrorNeedUpdatePeers(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// test lightning using stale region info (1,2,3), now the region is (11,12,13)
apiInvokeRecorder := map[string][]uint64{}
notLeaderResp := &sst.IngestResponse{
Error: &errorpb.Error{
NotLeader: &errorpb.NotLeader{Leader: &metapb.Peer{StoreId: 11}},
}}
local := &Backend{
splitCli: initTestSplitClient3Replica([][]byte{{}, {'a'}, {}}, nil),
importClientFactory: &mockImportClientFactory{
stores: []*metapb.Store{
{Id: 1}, {Id: 2}, {Id: 3},
{Id: 11}, {Id: 12}, {Id: 13},
},
createClientFn: func(store *metapb.Store) sst.ImportSSTClient {
importCli := newMockImportClient()
importCli.store = store
importCli.apiInvokeRecorder = apiInvokeRecorder
if store.Id == 1 {
importCli.retry = 1
importCli.resp = notLeaderResp
}
return importCli
},
},
logger: log.L(),
writeLimiter: noopStoreWriteLimiter{},
bufferPool: membuf.NewPool(),
supportMultiIngest: true,
BackendConfig: BackendConfig{
ShouldCheckWriteStall: true,
},
tikvCodec: keyspace.CodecV1,
}
data := mockIngestData{{[]byte("a"), []byte("a")}}
jobCh := make(chan *regionJob, 10)
staleJob := ®ionJob{
keyRange: Range{start: []byte("a"), end: []byte("")},
region: &split.RegionInfo{
Region: &metapb.Region{
Id: 1,
Peers: []*metapb.Peer{
{Id: 1, StoreId: 1}, {Id: 2, StoreId: 2}, {Id: 3, StoreId: 3},
},
StartKey: []byte("a"),
EndKey: []byte(""),
},
Leader: &metapb.Peer{Id: 1, StoreId: 1},
},
stage: regionScanned,
ingestData: data,
}
var jobWg sync.WaitGroup
jobWg.Add(1)
jobCh <- staleJob
var wg sync.WaitGroup
wg.Add(1)
jobOutCh := make(chan *regionJob)
go func() {
defer wg.Done()
for {
job := <-jobOutCh
if job.stage == ingested {
jobWg.Done()
return
}
jobCh <- job
}
}()
wg.Add(1)
go func() {
defer wg.Done()
err := local.startWorker(ctx, jobCh, jobOutCh, &jobWg)
require.NoError(t, err)
}()
jobWg.Wait()
cancel()
wg.Wait()
// "ingest" to test peers busy of stale region: 1,2,3
// then "write" to stale region: 1,2,3
// then "ingest" to stale leader: 1
// then meet NotLeader error, scanned new region (11,12,13)
// repeat above for 11,12,13
require.Equal(t, []uint64{1, 2, 3, 11, 12, 13}, apiInvokeRecorder["Write"])
require.Equal(t, []uint64{1, 2, 3, 1, 11, 12, 13, 11}, apiInvokeRecorder["MultiIngest"])
}
func TestPartialWriteIngestErrorWontPanic(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// let lightning meet any error that will call convertStageTo(needRescan)
apiInvokeRecorder := map[string][]uint64{}
notLeaderResp := &sst.IngestResponse{
Error: &errorpb.Error{
NotLeader: &errorpb.NotLeader{Leader: &metapb.Peer{StoreId: 11}},
}}
local := &Backend{
splitCli: initTestSplitClient3Replica([][]byte{{}, {'c'}}, nil),
importClientFactory: &mockImportClientFactory{
stores: []*metapb.Store{
{Id: 1}, {Id: 2}, {Id: 3},
},
createClientFn: func(store *metapb.Store) sst.ImportSSTClient {
importCli := newMockImportClient()
importCli.store = store
importCli.apiInvokeRecorder = apiInvokeRecorder
if store.Id == 1 {
importCli.retry = 1
importCli.resp = notLeaderResp
}
return importCli
},
},
logger: log.L(),
writeLimiter: noopStoreWriteLimiter{},
bufferPool: membuf.NewPool(),
supportMultiIngest: true,
tikvCodec: keyspace.CodecV1,
}
data := mockIngestData{{[]byte("a"), []byte("a")}, {[]byte("a2"), []byte("a2")}}
jobCh := make(chan *regionJob, 10)
partialWriteJob := ®ionJob{
keyRange: Range{start: []byte("a"), end: []byte("c")},
region: &split.RegionInfo{
Region: &metapb.Region{
Id: 1,
Peers: []*metapb.Peer{
{Id: 1, StoreId: 1}, {Id: 2, StoreId: 2}, {Id: 3, StoreId: 3},
},
StartKey: []byte("a"),
EndKey: []byte("c"),
},
Leader: &metapb.Peer{Id: 1, StoreId: 1},
},
stage: regionScanned,
ingestData: data,
// use small regionSplitSize to trigger partial write
regionSplitSize: 1,
}
var jobWg sync.WaitGroup
jobWg.Add(1)
jobCh <- partialWriteJob
var wg sync.WaitGroup
wg.Add(1)
jobOutCh := make(chan *regionJob)
go func() {
defer wg.Done()
for {
job := <-jobOutCh
if job.stage == regionScanned {
jobWg.Done()
return
}
require.Fail(t, "job stage %s is not expected", job.stage)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
err := local.startWorker(ctx, jobCh, jobOutCh, &jobWg)
require.NoError(t, err)
}()
jobWg.Wait()
cancel()
wg.Wait()
require.Equal(t, []uint64{1, 2, 3}, apiInvokeRecorder["Write"])
require.Equal(t, []uint64{1}, apiInvokeRecorder["MultiIngest"])
}
func TestPartialWriteIngestBusy(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
apiInvokeRecorder := map[string][]uint64{}
notLeaderResp := &sst.IngestResponse{
Error: &errorpb.Error{
ServerIsBusy: &errorpb.ServerIsBusy{},
}}
onceResp := &atomic.Pointer[sst.IngestResponse]{}
onceResp.Store(notLeaderResp)
local := &Backend{
splitCli: initTestSplitClient3Replica([][]byte{{}, {'c'}}, nil),
importClientFactory: &mockImportClientFactory{
stores: []*metapb.Store{
{Id: 1}, {Id: 2}, {Id: 3},
},
createClientFn: func(store *metapb.Store) sst.ImportSSTClient {
importCli := newMockImportClient()
importCli.store = store
importCli.apiInvokeRecorder = apiInvokeRecorder
if store.Id == 1 {
importCli.retry = 1
importCli.onceResp = onceResp
}
return importCli
},
},
logger: log.L(),
writeLimiter: noopStoreWriteLimiter{},
bufferPool: membuf.NewPool(),
supportMultiIngest: true,
tikvCodec: keyspace.CodecV1,
}
db, tmpPath := makePebbleDB(t, nil)
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel2 := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel2,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: common.NoopKeyAdapter{},
logger: log.L(),
}
f.db.Store(db)
err := db.Set([]byte("a"), []byte("a"), nil)
require.NoError(t, err)
err = db.Set([]byte("a2"), []byte("a2"), nil)
require.NoError(t, err)
jobCh := make(chan *regionJob, 10)
partialWriteJob := ®ionJob{
keyRange: Range{start: []byte("a"), end: []byte("c")},
region: &split.RegionInfo{
Region: &metapb.Region{
Id: 1,
Peers: []*metapb.Peer{
{Id: 1, StoreId: 1}, {Id: 2, StoreId: 2}, {Id: 3, StoreId: 3},
},
StartKey: []byte("a"),
EndKey: []byte("c"),
},
Leader: &metapb.Peer{Id: 1, StoreId: 1},
},
stage: regionScanned,
ingestData: f,
// use small regionSplitSize to trigger partial write
regionSplitSize: 1,
}
var jobWg sync.WaitGroup
jobWg.Add(1)
jobCh <- partialWriteJob
var wg sync.WaitGroup
wg.Add(1)
jobOutCh := make(chan *regionJob)
go func() {
defer wg.Done()
for {
job := <-jobOutCh
switch job.stage {
case wrote:
// mimic retry later
jobCh <- job
case ingested:
// partially write will change the start key
require.Equal(t, []byte("a2"), job.keyRange.start)
require.Equal(t, []byte("c"), job.keyRange.end)
jobWg.Done()
return
default:
require.Fail(t, "job stage %s is not expected, job: %v", job.stage, job)
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
err := local.startWorker(ctx, jobCh, jobOutCh, &jobWg)
require.NoError(t, err)
}()
jobWg.Wait()
cancel()
wg.Wait()
require.Equal(t, int64(2), f.importedKVCount.Load())
require.Equal(t, []uint64{1, 2, 3, 1, 2, 3}, apiInvokeRecorder["Write"])
require.Equal(t, []uint64{1, 1, 1}, apiInvokeRecorder["MultiIngest"])
}
// mockGetSizeProperties mocks that 50MB * 20 SST file.
func mockGetSizeProperties(log.Logger, *pebble.DB, common.KeyAdapter) (*sizeProperties, error) {
props := newSizeProperties()
// keys starts with 0 is meta keys, so we start with 1.
for i := byte(1); i <= 10; i++ {
rangeProps := &rangeProperty{
Key: []byte{i},
rangeOffsets: rangeOffsets{
Size: 50 * units.MiB,
Keys: 100_000,
},
}
props.add(rangeProps)
rangeProps = &rangeProperty{
Key: []byte{i, 1},
rangeOffsets: rangeOffsets{
Size: 50 * units.MiB,
Keys: 100_000,
},
}
props.add(rangeProps)
}
return props, nil
}
type panicSplitRegionClient struct{}
func (p panicSplitRegionClient) BeforeSplitRegion(context.Context, *split.RegionInfo, [][]byte) (*split.RegionInfo, [][]byte) {
panic("should not be called")
}
func (p panicSplitRegionClient) AfterSplitRegion(context.Context, *split.RegionInfo, [][]byte, []*split.RegionInfo, error) ([]*split.RegionInfo, error) {
panic("should not be called")
}
func (p panicSplitRegionClient) BeforeScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]byte, []byte, int) {
return key, endKey, limit
}
func (p panicSplitRegionClient) AfterScanRegions(infos []*split.RegionInfo, err error) ([]*split.RegionInfo, error) {
return infos, err
}
func TestSplitRangeAgain4BigRegion(t *testing.T) {
backup := getSizePropertiesFn
getSizePropertiesFn = mockGetSizeProperties
t.Cleanup(func() {
getSizePropertiesFn = backup
})
local := &Backend{
splitCli: initTestSplitClient(
[][]byte{{1}, {11}}, // we have one big region
panicSplitRegionClient{}, // make sure no further split region
),
}
local.BackendConfig.WorkerConcurrency = 1
db, tmpPath := makePebbleDB(t, nil)
_, engineUUID := backend.MakeUUID("ww", 0)
ctx := context.Background()
engineCtx, cancel := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: common.NoopKeyAdapter{},
logger: log.L(),
}
f.db.Store(db)
// keys starts with 0 is meta keys, so we start with 1.
for i := byte(1); i <= 10; i++ {
err := db.Set([]byte{i}, []byte{i}, nil)
require.NoError(t, err)
err = db.Set([]byte{i, 1}, []byte{i, 1}, nil)
require.NoError(t, err)
}
bigRegionRange := []Range{{start: []byte{1}, end: []byte{11}}}
jobCh := make(chan *regionJob, 10)
jobWg := sync.WaitGroup{}
err := local.generateAndSendJob(
ctx,
f,
bigRegionRange,
10*units.GB,
1<<30,
jobCh,
&jobWg,
)
require.NoError(t, err)
require.Len(t, jobCh, 10)
for i := 0; i < 10; i++ {
job := <-jobCh
require.Equal(t, []byte{byte(i + 1)}, job.keyRange.start)
require.Equal(t, []byte{byte(i + 2)}, job.keyRange.end)
jobWg.Done()
}
jobWg.Wait()
}
func getSuccessInjectedBehaviour() []injectedBehaviour {
return []injectedBehaviour{
{
write: injectedWriteBehaviour{
result: &tikvWriteResult{
remainingStartKey: nil,
},
},
},
{
ingest: injectedIngestBehaviour{
nextStage: ingested,
},
},
}
}
func getNeedRescanWhenIngestBehaviour() []injectedBehaviour {
return []injectedBehaviour{
{
write: injectedWriteBehaviour{
result: &tikvWriteResult{
remainingStartKey: nil,
},
},
},
{
ingest: injectedIngestBehaviour{
nextStage: needRescan,
err: common.ErrKVEpochNotMatch,
},
},
}
}
func TestDoImport(t *testing.T) {
backup := maxRetryBackoffSecond
maxRetryBackoffSecond = 1
t.Cleanup(func() {
maxRetryBackoffSecond = backup
})
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter", "return()")
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs", "return()")
t.Cleanup(func() {
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter")
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs")
})
// test that
// - one job need rescan when ingest
// - one job need retry when write
initRanges := []Range{
{start: []byte{'a'}, end: []byte{'b'}},
{start: []byte{'b'}, end: []byte{'c'}},
{start: []byte{'c'}, end: []byte{'d'}},
}
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"a", "b"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'a'}, end: []byte{'b'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"b", "c"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'b'}, end: []byte{'c'}},
ingestData: &Engine{},
injected: []injectedBehaviour{
{
write: injectedWriteBehaviour{
result: &tikvWriteResult{
remainingStartKey: []byte{'b', '2'},
},
},
},
{
ingest: injectedIngestBehaviour{
nextStage: ingested,
},
},
{
write: injectedWriteBehaviour{
result: &tikvWriteResult{
remainingStartKey: nil,
},
},
},
{
ingest: injectedIngestBehaviour{
nextStage: ingested,
},
},
},
},
},
},
{"c", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'c', '2'}},
ingestData: &Engine{},
injected: getNeedRescanWhenIngestBehaviour(),
},
{
keyRange: Range{start: []byte{'c', '2'}, end: []byte{'d'}},
ingestData: &Engine{},
injected: []injectedBehaviour{
{
write: injectedWriteBehaviour{
// a retryable error
err: status.Error(codes.Unknown, "is not fully replicated"),
},
},
},
},
},
},
{"c", "c2"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'c', '2'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"c2", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c', '2'}, end: []byte{'d'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
}
ctx := context.Background()
l := &Backend{
BackendConfig: BackendConfig{
WorkerConcurrency: 2,
},
}
e := &Engine{}
err := l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.NoError(t, err)
for _, v := range fakeRegionJobs {
for _, job := range v.jobs {
require.Len(t, job.injected, 0)
}
}
// test first call to generateJobForRange meet error
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"a", "b"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'a'}, end: []byte{'b'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"b", "c"}: {
err: errors.New("meet error when generateJobForRange"),
},
}
err = l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.ErrorContains(t, err, "meet error when generateJobForRange")
// test second call to generateJobForRange (needRescan) meet error
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"a", "b"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'a'}, end: []byte{'a', '2'}},
ingestData: &Engine{},
injected: getNeedRescanWhenIngestBehaviour(),
},
{
keyRange: Range{start: []byte{'a', '2'}, end: []byte{'b'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"b", "c"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'b'}, end: []byte{'c'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"c", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'d'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"a", "a2"}: {
err: errors.New("meet error when generateJobForRange again"),
},
}
err = l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.ErrorContains(t, err, "meet error when generateJobForRange again")
// test write meet unretryable error
maxRetryBackoffSecond = 100
l.WorkerConcurrency = 1
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"a", "b"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'a'}, end: []byte{'b'}},
ingestData: &Engine{},
retryCount: maxWriteAndIngestRetryTimes - 1,
injected: getSuccessInjectedBehaviour(),
},
},
},
{"b", "c"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'b'}, end: []byte{'c'}},
ingestData: &Engine{},
retryCount: maxWriteAndIngestRetryTimes - 1,
injected: getSuccessInjectedBehaviour(),
},
},
},
{"c", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'d'}},
ingestData: &Engine{},
retryCount: maxWriteAndIngestRetryTimes - 2,
injected: []injectedBehaviour{
{
write: injectedWriteBehaviour{
// unretryable error
err: errors.New("fatal error"),
},
},
},
},
},
},
}
err = l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.ErrorContains(t, err, "fatal error")
for _, v := range fakeRegionJobs {
for _, job := range v.jobs {
require.Len(t, job.injected, 0)
}
}
}
func TestRegionJobResetRetryCounter(t *testing.T) {
backup := maxRetryBackoffSecond
maxRetryBackoffSecond = 1
t.Cleanup(func() {
maxRetryBackoffSecond = backup
})
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter", "return()")
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs", "return()")
t.Cleanup(func() {
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter")
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs")
})
// test that job need rescan when ingest
initRanges := []Range{
{start: []byte{'c'}, end: []byte{'d'}},
}
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"c", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'c', '2'}},
ingestData: &Engine{},
injected: getNeedRescanWhenIngestBehaviour(),
retryCount: maxWriteAndIngestRetryTimes,
},
{
keyRange: Range{start: []byte{'c', '2'}, end: []byte{'d'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
retryCount: maxWriteAndIngestRetryTimes,
},
},
},
{"c", "c2"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'c', '2'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
}
ctx := context.Background()
l := &Backend{
BackendConfig: BackendConfig{
WorkerConcurrency: 2,
},
}
e := &Engine{}
err := l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.NoError(t, err)
for _, v := range fakeRegionJobs {
for _, job := range v.jobs {
require.Len(t, job.injected, 0)
}
}
}
func TestCtxCancelIsIgnored(t *testing.T) {
backup := maxRetryBackoffSecond
maxRetryBackoffSecond = 1
t.Cleanup(func() {
maxRetryBackoffSecond = backup
})
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter", "return()")
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs", "return()")
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/beforeGenerateJob", "sleep(1000)")
_ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace", "return()")
t.Cleanup(func() {
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter")
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/fakeRegionJobs")
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/beforeGenerateJob")
_ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace")
})
initRanges := []Range{
{start: []byte{'c'}, end: []byte{'d'}},
{start: []byte{'d'}, end: []byte{'e'}},
}
fakeRegionJobs = map[[2]string]struct {
jobs []*regionJob
err error
}{
{"c", "d"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'c'}, end: []byte{'d'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
{"d", "e"}: {
jobs: []*regionJob{
{
keyRange: Range{start: []byte{'d'}, end: []byte{'e'}},
ingestData: &Engine{},
injected: getSuccessInjectedBehaviour(),
},
},
},
}
ctx := context.Background()
l := &Backend{
BackendConfig: BackendConfig{
WorkerConcurrency: 1,
},
}
e := &Engine{}
err := l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys))
require.ErrorContains(t, err, "the remaining storage capacity of TiKV")
}
func TestExternalEngineLoadIngestData(t *testing.T) {
ctx := context.Background()
memstore := storage.NewMemStorage()
keys := make([][]byte, 100)
values := make([][]byte, 100)
for i := range keys {
keys[i] = []byte(fmt.Sprintf("key%06d", i))
values[i] = []byte(fmt.Sprintf("value%06d", i))
}
endKey := []byte(fmt.Sprintf("key%06d", 100))
dataFiles, statFiles, err := external.MockExternalEngine(memstore, keys, values)
require.NoError(t, err)
externalEngine := external.NewExternalEngine(
memstore,
dataFiles,
statFiles,
common.NoopKeyAdapter{},
false,
nil,
common.DupDetectOpt{},
123,
)
local := &Backend{
BackendConfig: BackendConfig{
WorkerConcurrency: 2,
},
splitCli: initTestSplitClient([][]byte{
keys[0], keys[50], endKey,
}, nil),
}
ranges := []Range{
{start: keys[0], end: keys[30]},
{start: keys[30], end: keys[60]},
{start: keys[60], end: keys[90]},
{start: keys[90], end: endKey},
}
jobToWorkerCh := make(chan *regionJob, 10)
jobWg := new(sync.WaitGroup)
err = local.generateAndSendJob(
ctx,
externalEngine,
ranges,
1<<30,
1<<20,
jobToWorkerCh,
jobWg,
)
require.NoError(t, err)
require.Len(t, jobToWorkerCh, 5)
jobs := make([]*regionJob, 0, 5)
for i := 0; i < 5; i++ {
jobs = append(jobs, <-jobToWorkerCh)
}
sort.Slice(jobs, func(i, j int) bool {
return bytes.Compare(jobs[i].keyRange.start, jobs[j].keyRange.start) < 0
})
expectedKeyRanges := []Range{
{start: keys[0], end: keys[30]},
{start: keys[30], end: keys[50]},
{start: keys[50], end: keys[60]},
{start: keys[60], end: keys[90]},
{start: keys[90], end: endKey},
}
kvIdx := 0
for i, job := range jobs {
require.Equal(t, expectedKeyRanges[i], job.keyRange)
iter := job.ingestData.NewIter(ctx, job.keyRange.start, job.keyRange.end)
for iter.First(); iter.Valid(); iter.Next() {
require.Equal(t, keys[kvIdx], iter.Key())
require.Equal(t, values[kvIdx], iter.Value())
kvIdx++
}
require.NoError(t, iter.Error())
require.NoError(t, iter.Close())
}
require.Equal(t, 100, kvIdx)
}
|
package activitystream
import (
"strconv"
"time"
)
// MakeTimestamp returns the given time as unix milliseconds
func MakeTimestamp(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond)
}
// CreateTokens generates and returns previous and next token from an array of activities and pagination information
// size the size of the page
// direction theDirection of the previous request, this is needed for determining first and last page
// activities the last result
func CreateTokens(size int, direction Direction, activities []Activity) (prev, next string) {
leng := len(activities)
if leng == 0 {
return
}
lastPivot := strconv.Itoa(int(MakeTimestamp(activities[leng-1].Published)))
firstPivot := strconv.Itoa(int(MakeTimestamp(activities[0].Published)))
s := strconv.Itoa(size)
if direction == After || leng >= size {
prev = "?s=" + s + "&before=" + firstPivot
}
if direction == Before || leng >= size {
next = "?s=" + s + "&after=" + lastPivot
}
return
}
|
package main
//给定一个字符串 s 和一个整数 k,你需要对从字符串开头算起的每隔 2k 个字符的前 k 个字符进行反转。
//
//如果剩余字符少于 k 个,则将剩余字符全部反转。
//如果剩余字符小于 2k 但大于或等于 k 个,则反转前 k 个字符,其余字符保持原样。
func main() {
}
func reverseStr(s string, k int) string {
if k == 1 || len(s) <= 1 {
return s
}
in := []byte(s)
cnt := len(in) / (2 * k)
for i := 0; i < len(in)/(2*k); i++ {
reverse(in[i*2*k : i*2*k+k])
}
if len(in)%(2*k) >= k {
reverse(in[cnt*2*k : cnt*2*k+k])
} else {
reverse(in[cnt*2*k:])
}
return string(in)
}
func reverse(word []byte) {
if len(word) <= 1 {
return
}
for i, j := 0, len(word)-1; i < j; i, j = i+1, j-1 {
word[i], word[j] = word[j], word[i]
}
return
}
|
package common
//常用方法
import (
"crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"io"
"regexp"
"strings"
mr "math/rand"
"path/filepath"
"os"
"bufio"
"fmt"
)
//md5方法
func GetMd5String(s string) string {
h := md5.New()
h.Write([]byte(s))
return hex.EncodeToString(h.Sum(nil))
}
//Guid方法
func GetGuid() string {
b := make([]byte, 48)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return GetMd5String(base64.URLEncoding.EncodeToString(b))
}
//字串截取
func SubString(s string, pos, length int) string {
runes := []rune(s)
l := pos + length
if l > len(runes) {
l = len(runes)
}
return string(runes[pos:l])
}
//判断是否包含
func SliceContains(src []string,value string)bool{
isContain := false
for _,srcValue := range src {
if(srcValue == value){
isContain = true
break
}
}
return isContain
}
//判断key是否存在
func MapContains(src map[string]int ,key string) bool{
if _, ok := src[key]; ok {
return true
}
return false
}
func RemoveDuplicate(list *[]int) []int {
var x []int = []int{}
for _, i := range *list {
if len(x) == 0 {
x = append(x, i)
} else {
for k, v := range x {
if i == v {
break
}
if k == len(x)-1 {
x = append(x, i)
}
}
}
}
return x
}
func CheckRepeat(list []int,id int) bool {
if len(list) == 0 {
return false
}
for _, v := range list{
if v == id{
return true
}
}
return false
}
//去除html标签
func TrimHtml(src string) string {
//将HTML标签全转换成小写
re, _ := regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllStringFunc(src, strings.ToLower)
//去除STYLE
re, _ = regexp.Compile("\\<style[\\S\\s]+?\\</style\\>")
src = re.ReplaceAllString(src, "")
//去除SCRIPT
re, _ = regexp.Compile("\\<script[\\S\\s]+?\\</script\\>")
src = re.ReplaceAllString(src, "")
//去除所有尖括号内的HTML代码,并换成换行符
re, _ = regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllString(src, "\n")
//去除连续的换行符
re, _ = regexp.Compile("\\s{2,}")
src = re.ReplaceAllString(src, "\n")
//去除  
re, _ = regexp.Compile("\\ ")
src = re.ReplaceAllString(src, "")
return strings.TrimSpace(src)
}
//从数组中随机取一个
func RandGetArray(a []string) string {
len := len(a)
i := mr.Intn(len)
return a[i]
}
//当前项目根目录
var APP_ROOT string
// 获取项目路径
func GetPath() string {
if APP_ROOT != "" {
return APP_ROOT
}
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
print(err.Error())
}
APP_ROOT = strings.Replace(dir, "\\", "/", -1)
return APP_ROOT
}
func UpdateDic(str string) {
f, err := os.Open("./data/dictionary.txt")
if err != nil {
panic(err)
}
defer f.Close()
rd := bufio.NewReader(f)
rs := false
for {
line, err := rd.ReadString('\n') //以'\n'为结束符读入一行
thisLine := strings.Fields(line)
name := thisLine[0]
if strings.EqualFold(name, str) == true {
fmt.Println(str + "字典中已存在")
rs = true
break
}
if err != nil || io.EOF == err {
break
}
}
if rs == false {
fmt.Println(str + "字典中不存在, 执行更新...")
fd,_:=os.OpenFile("./data/dictionary.txt",os.O_RDWR|os.O_CREATE|os.O_APPEND,0644)
fd_content:=strings.Join([]string{"\n",str," ","3"},"")
buf:=[]byte(fd_content)
fd.Write(buf)
fd.Close()
}
}
func StrToSlice(str string) []string {
canSplit := func (c rune) bool { return c == ','}
return strings.FieldsFunc(str,canSplit) //字符串转数组
}
//判断文件或文件夹是否存在
func IsExist(path string) bool {
_, err := os.Stat(path)
return err == nil || os.IsExist(err)
// 或者
//return err == nil || !os.IsNotExist(err)
// 或者
//return !os.IsNotExist(err)
}
//没有回答上来,随机回答
func RangeAnswer() string {
f, err := os.Open("./data/noanswer.txt")
fmt.Println(err)
if err != nil {
panic(err)
}
defer f.Close()
rd := bufio.NewReader(f)
words := []string{}
i := 0
for {
line, err := rd.ReadString('\n') //以'\n'为结束符读入一行
thisLine := strings.Fields(line)
fmt.Println(thisLine[0])
//words[i] = thisLine[0]
i++
if err != nil || io.EOF == err {
break
}
}
fmt.Println(words)
return "111"
}
func FaqType(typ int) string {
switch typ {
case 1:
return "[文本]"
case 2:
return "[图片]"
default:
return "[文章]"
}
} |
package main
import "fmt"
var s string
func main() {
s="G"
fmt.Println(s)
f1()
}
func f1() {
s="O"
fmt.Println(s)
f2()
}
func f2() {
fmt.Println(s)
} |
package configure
import (
"fmt"
log "github.com/mailgun/gotools-log"
"github.com/mailgun/vulcan"
"github.com/mailgun/vulcan/endpoint"
"github.com/mailgun/vulcan/loadbalance/roundrobin"
"github.com/mailgun/vulcan/location/httploc"
"github.com/mailgun/vulcan/route/pathroute"
. "github.com/mailgun/vulcand/adapter"
. "github.com/mailgun/vulcand/backend"
. "github.com/mailgun/vulcand/connwatch"
. "github.com/mailgun/vulcand/endpoint"
)
const ConnWatch = "_vulcanConnWatch"
// Configurator watches changes to the dynamic backends and applies those changes to the proxy in real time.
type Configurator struct {
connWatcher *ConnectionWatcher
proxy *vulcan.Proxy
a *Adapter
}
func NewConfigurator(proxy *vulcan.Proxy) (c *Configurator) {
return &Configurator{
proxy: proxy,
a: NewAdapter(proxy),
connWatcher: NewConnectionWatcher(),
}
}
func (c *Configurator) GetConnWatcher() *ConnectionWatcher {
return c.connWatcher
}
func (c *Configurator) WatchChanges(changes chan interface{}) error {
for {
change := <-changes
if err := c.processChange(change); err != nil {
log.Errorf("Failed to process change %#v, err: %s", change, err)
}
}
return nil
}
func (c *Configurator) processChange(ch interface{}) error {
switch change := ch.(type) {
case *HostAdded:
return c.upsertHost(change.Host)
case *HostDeleted:
return c.deleteHost(change.Name)
case *LocationAdded:
return c.upsertLocation(change.Host, change.Location)
case *LocationDeleted:
return c.deleteLocation(change.Host, change.LocationId)
case *LocationUpstreamUpdated:
return c.updateLocationUpstream(change.Host, change.Location)
case *LocationPathUpdated:
return c.updateLocationPath(change.Host, change.Location, change.Path)
case *LocationRateLimitAdded:
return c.upsertLocationRateLimit(change.Host, change.Location, change.RateLimit)
case *LocationRateLimitUpdated:
return c.upsertLocationRateLimit(change.Host, change.Location, change.RateLimit)
case *LocationRateLimitDeleted:
return c.deleteLocationRateLimit(change.Host, change.Location, change.RateLimitEtcdKey)
case *LocationConnLimitAdded:
return c.upsertLocationConnLimit(change.Host, change.Location, change.ConnLimit)
case *LocationConnLimitUpdated:
return c.upsertLocationConnLimit(change.Host, change.Location, change.ConnLimit)
case *LocationConnLimitDeleted:
return c.deleteLocationConnLimit(change.Host, change.Location, change.ConnLimitEtcdKey)
case *UpstreamAdded:
return nil
case *UpstreamDeleted:
return nil
case *EndpointAdded:
return c.addEndpoint(change.Upstream, change.Endpoint, change.AffectedLocations)
case *EndpointUpdated:
return c.addEndpoint(change.Upstream, change.Endpoint, change.AffectedLocations)
case *EndpointDeleted:
return c.deleteEndpoint(change.Upstream, change.EndpointId, change.AffectedLocations)
}
return fmt.Errorf("Unsupported change: %#v", ch)
}
func (c *Configurator) upsertHost(host *Host) error {
if c.a.GetHostRouter().GetRouter(host.Name) != nil {
return nil
}
router := pathroute.NewPathRouter()
c.a.GetHostRouter().SetRouter(host.Name, router)
log.Infof("Added %s", host)
return nil
}
func (c *Configurator) deleteHost(hostname string) error {
log.Infof("Removed host %s", hostname)
c.a.GetHostRouter().RemoveRouter(hostname)
return nil
}
func (c *Configurator) upsertLocation(host *Host, loc *Location) error {
if err := c.upsertHost(host); err != nil {
return err
}
// If location already exists, do nothing
if loc := c.a.GetHttpLocation(host.Name, loc.Id); loc != nil {
return nil
}
router := c.a.GetPathRouter(host.Name)
if router == nil {
return fmt.Errorf("Router not found for %s", host)
}
// Create a load balancer that handles all the endpoints within the given location
rr, err := roundrobin.NewRoundRobin()
if err != nil {
return err
}
// Create a location itself
location, err := httploc.NewLocation(loc.Id, rr)
if err != nil {
return err
}
// Always register a global connection watcher
location.GetObserverChain().Upsert(ConnWatch, c.connWatcher)
// Add the location to the router
if err := router.AddLocation(loc.Path, location); err != nil {
return err
}
// Add rate and connection limits
for _, rl := range loc.RateLimits {
if err := c.upsertLocationRateLimit(host, loc, rl); err != nil {
log.Errorf("Failed to add rate limit: %s", err)
}
}
for _, cl := range loc.ConnLimits {
if err := c.upsertLocationConnLimit(host, loc, cl); err != nil {
log.Errorf("Failed to add connection limit: %s", err)
}
}
// Once the location added, configure all endpoints
return c.syncLocationEndpoints(loc)
}
func (c *Configurator) deleteLocation(host *Host, locationId string) error {
router := c.a.GetPathRouter(host.Name)
if router == nil {
return fmt.Errorf("Router for %s not found", host)
}
location := router.GetLocationById(locationId)
if location == nil {
return fmt.Errorf("Location(id=%s) not found", locationId)
}
return router.RemoveLocation(location)
}
func (c *Configurator) upsertLocationConnLimit(host *Host, loc *Location, cl *ConnLimit) error {
if err := c.upsertLocation(host, loc); err != nil {
return err
}
location := c.a.GetHttpLocation(host.Name, loc.Id)
if location == nil {
return fmt.Errorf("%s not found", loc)
}
limiter, err := NewConnLimiter(cl)
if err != nil {
return err
}
location.GetMiddlewareChain().Upsert(cl.EtcdKey, limiter)
return nil
}
func (c *Configurator) upsertLocationRateLimit(host *Host, loc *Location, rl *RateLimit) error {
if err := c.upsertLocation(host, loc); err != nil {
return err
}
location := c.a.GetHttpLocation(host.Name, loc.Id)
if location == nil {
return fmt.Errorf("%s not found", loc)
}
limiter, err := NewRateLimiter(rl)
if err != nil {
return err
}
location.GetMiddlewareChain().Upsert(rl.EtcdKey, limiter)
return nil
}
func (c *Configurator) deleteLocationRateLimit(host *Host, loc *Location, limitId string) error {
location := c.a.GetHttpLocation(host.Name, loc.Id)
if location == nil {
return fmt.Errorf("%s not found", loc)
}
return location.GetMiddlewareChain().Remove(limitId)
}
func (c *Configurator) deleteLocationConnLimit(host *Host, loc *Location, limitId string) error {
location := c.a.GetHttpLocation(host.Name, loc.Id)
if location == nil {
return fmt.Errorf("%s not found", loc)
}
return location.GetMiddlewareChain().Remove(limitId)
}
func (c *Configurator) updateLocationPath(host *Host, location *Location, path string) error {
if err := c.deleteLocation(host, location.Id); err != nil {
return err
}
return c.upsertLocation(host, location)
}
func (c *Configurator) updateLocationUpstream(host *Host, location *Location) error {
if err := c.upsertLocation(host, location); err != nil {
return err
}
return c.syncLocationEndpoints(location)
}
func (c *Configurator) syncLocationEndpoints(location *Location) error {
rr := c.a.GetHttpLocationLb(location.Hostname, location.Id)
if rr == nil {
return fmt.Errorf("%s lb not found", location)
}
// First, collect and parse endpoints to add
newEndpoints := map[string]endpoint.Endpoint{}
for _, e := range location.Upstream.Endpoints {
ep, err := EndpointFromUrl(e.Url, e.Url)
if err != nil {
return fmt.Errorf("Failed to parse endpoint url: %s", e)
}
newEndpoints[e.Url] = ep
}
// Memorize what endpoints exist in load balancer at the moment
existingEndpoints := map[string]endpoint.Endpoint{}
for _, e := range rr.GetEndpoints() {
existingEndpoints[e.GetUrl().String()] = e
}
// First, add endpoints, that should be added and are not in lb
for _, e := range newEndpoints {
if _, exists := existingEndpoints[e.GetUrl().String()]; !exists {
if err := rr.AddEndpoint(e); err != nil {
log.Errorf("Failed to add %s, err: %s", e, err)
} else {
log.Infof("Added %s to %s", e, location)
}
}
}
// Second, remove endpoints that should not be there any more
for _, e := range existingEndpoints {
if _, exists := newEndpoints[e.GetUrl().String()]; !exists {
if err := rr.RemoveEndpoint(e); err != nil {
log.Errorf("Failed to remove %s, err: %s", e, err)
} else {
log.Infof("Removed %s from %s", e, location)
}
}
}
return nil
}
func (c *Configurator) addEndpoint(upstream *Upstream, e *Endpoint, affectedLocations []*Location) error {
endpoint, err := EndpointFromUrl(e.EtcdKey, e.Url)
if err != nil {
return fmt.Errorf("Failed to parse endpoint url: %s", endpoint)
}
for _, l := range affectedLocations {
if err := c.syncLocationEndpoints(l); err != nil {
log.Errorf("Failed to sync %s endpoints err: %s", l, err)
}
}
return nil
}
func (c *Configurator) deleteEndpoint(upstream *Upstream, endpointId string, affectedLocations []*Location) error {
for _, l := range affectedLocations {
if err := c.syncLocationEndpoints(l); err != nil {
log.Errorf("Failed to sync %s endpoints err: %s", l, err)
}
}
return nil
}
|
package pxf
import (
"errors"
"github.com/greenplum-db/gp-common-go-libs/operating"
"os"
)
type CliInputs struct {
Gphome string
PxfConf string
Cmd Command
}
type EnvVar string
const (
Gphome EnvVar = "GPHOME"
PxfConf EnvVar = "PXF_CONF"
)
type Command string
const (
Init Command = "init"
Start Command = "start"
Stop Command = "stop"
Sync Command = "sync"
)
var (
SuccessMessage = map[Command]string{
Init: "PXF initialized successfully on %d out of %d hosts\n",
Start: "PXF started successfully on %d out of %d hosts\n",
Stop: "PXF stopped successfully on %d out of %d hosts\n",
Sync: "PXF configs synced successfully on %d out of %d hosts\n",
}
ErrorMessage = map[Command]string{
Init: "PXF failed to initialize on %d out of %d hosts\n",
Start: "PXF failed to start on %d out of %d hosts\n",
Stop: "PXF failed to stop on %d out of %d hosts\n",
Sync: "PXF configs failed to sync on %d out of %d hosts\n",
}
StatusMessage = map[Command]string{
Init: "Initializing PXF on master and %d segment hosts...\n",
Start: "Starting PXF on %d segment hosts...\n",
Stop: "Stopping PXF on %d segment hosts...\n",
Sync: "Syncing PXF configuration files to %d hosts...\n",
}
)
func makeValidCliInputs(cmd Command) (*CliInputs, error) {
gphome, err := validateEnvVar(Gphome)
if err != nil {
return nil, err
}
pxfConf := ""
if cmd == Init {
pxfConf, err = validateEnvVar(PxfConf)
if err != nil {
return nil, err
}
}
return &CliInputs{Cmd: cmd, Gphome: gphome, PxfConf: pxfConf}, nil
}
func validateEnvVar(envVar EnvVar) (string, error) {
envVarValue, isEnvVarSet := os.LookupEnv(string(envVar))
if !isEnvVarSet {
return "", errors.New(string(envVar) + " must be set")
}
if envVarValue == "" {
return "", errors.New(string(envVar) + " cannot be blank")
}
return envVarValue, nil
}
func RemoteCommandToRunOnSegments(command Command) (string, error) {
inputs, err := makeValidCliInputs(command)
if err != nil {
return "", err
}
pxfCommand := ""
if inputs.PxfConf != "" {
pxfCommand += "PXF_CONF=" + inputs.PxfConf + " "
}
pxfCommand += inputs.Gphome + "/pxf/bin/pxf" + " " + string(inputs.Cmd)
if command == Sync {
hostname, _ := operating.System.Hostname()
pxfCommand += " " + hostname
}
return pxfCommand, nil
}
|
package server
import (
"context"
"flag"
"io/ioutil"
"net"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opencensus.io/examples/exporter"
"go.uber.org/zap"
api "github.com/alexeyqian/proglog/api/v1"
"github.com/alexeyqian/proglog/internal/auth"
configx "github.com/alexeyqian/proglog/internal/config"
"github.com/alexeyqian/proglog/internal/log"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
)
var debug = flag.Bool("debug", false, "Enable obervability for debugging")
func TestMain(m *testing.M) {
flag.Parse()
if *debug {
logger, err := zap.NewDevelopment()
if err != nil {
panic(err)
}
zap.ReplaceGlobals(logger)
}
os.Exit(m.Run())
}
type fn func(*testing.T, api.LogClient, api.LogClient, *Config)
func TestServer(t *testing.T) {
funcsMap := map[string]fn{
"produce and consume": testProduceConsume,
"produce and consume stream": testProduceConsumeStream,
"consume past log boundary": testConsumePastBoundary,
"unauthorized fails": testUnauthorized,
}
for scenario, fn := range funcsMap {
t.Run(scenario, func(t *testing.T) {
rootClient, nobodyClient, config, teardown := setupTest(t, nil)
defer teardown()
fn(t, rootClient, nobodyClient, config)
})
}
}
func setupTest(t *testing.T, fn func(*Config)) (
rootClient api.LogClient,
nobodyClient api.LogClient,
config *Config,
tearndown func(),
) {
t.Helper()
// 1 setup server
// 1.1 setup commit log
dir, err := ioutil.TempDir("", "server-test")
require.NoError(t, err)
commitLog, err := log.NewLog(dir, log.Config{})
require.NoError(t, err)
authorizer := auth.New(configx.ACLModelFile, configx.ACLPolicyFile)
var telemetryExporter *exporter.LogExporter
if *debug {
metricsLogFile, err := ioutil.TempFile("", "metrics-*.log")
require.NoError(t, err)
t.Logf("metrics log file: %s", metricsLogFile.Name())
tracesLogFile, err := ioutil.TempFile("", "trces-*.log")
require.NoError(t, err)
t.Logf("traces log file: %s", tracesLogFile.Name())
telemetryExporter, err = exporter.NewlogExporter(exporter.Options{
MetricsLogFile: metricsLogFile.Name(),
TracesLogFile: tracesLogFile.Name(),
ReportingInterval: time.Second,
})
require.NoError(t, err)
err = telemetryExporter.Start()
require.NoError(t, err)
}
cfg := Config{
CommitLog: commitLog,
Authorizer: authorizer,
}
if fn != nil {
fn(&cfg)
}
// 1.2 setup listen
// port :0 means auto assign a free port
listen, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
// 1.3 setup new grpc server
serverTLSConfig, err := configx.SetupTLSConfig(configx.TLSConfig{
CAFile: configx.CAFile,
CertFile: configx.ServerCertFile,
KeyFile: configx.ServerKeyFile,
ServerAddress: listen.Addr().String(),
Server: true,
})
require.NoError(t, err)
serverCreds := credentials.NewTLS(serverTLSConfig)
server, err := NewGRPCServer(&cfg, grpc.Creds(serverCreds))
require.NoError(t, err)
// 1.4 run blocking serv in go routine
// Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode,
// which means that the RPC call waits for the server to respond,
//and will either return a response or an error.
go func() {
// Serve is a blocking call, has to run in go routine
// otherwise any code below it wouldn't able to run.
server.Serve(listen)
}()
// 2. setup client stub
// 2.1 setup channel
/*
clientTLSConfig, err := configx.SetupTLSConfig(configx.TLSConfig{
CAFile: configx.CAFile,
CertFile: configx.ClientCertFile,
KeyFile: configx.ClientKeyFile,
})
require.NoError(t, err)
// use our CA as the client's root CA,will used to verify the server.
clientCreds := credentials.NewTLS(clientTLSConfig)
//clientOptions := []grpc.DialOption{grpc.WithInsecure()}
clientConnection, err := grpc.Dial(listen.Addr().String(), grpc.WithTransportCredentials(clientCreds))
require.NoError(t, err)
// 2.2 setup client stub intance
client = api.NewLogClient(clientConnection)
*/
// 2.3 set up new client
newClient := func(crtPath, keyPath string) (
*grpc.ClientConn,
api.LogClient,
[]grpc.DialOption,
) {
tlsConfig, err := configx.SetupTLSConfig(configx.TLSConfig{
CertFile: crtPath,
KeyFile: keyPath,
CAFile: configx.CAFile,
Server: false,
})
require.NoError(t, err)
tlsCreds := credentials.NewTLS(tlsConfig)
opts := []grpc.DialOption{grpc.WithTransportCredentials(tlsCreds)}
conn, err := grpc.Dial(listen.Addr().String(), opts...)
require.NoError(t, err)
client := api.NewLogClient(conn)
return conn, client, opts
}
var rootConn *grpc.ClientConn
rootConn, rootClient, _ = newClient(configx.RootClientCertFile, configx.RootClientKeyFile)
var nobodyConn *grpc.ClientConn
nobodyConn, nobodyClient, _ = newClient(configx.NobodyClientCertFile, configx.NobodyClientKeyFile)
// 3. return instances
return rootClient, nobodyClient, &cfg, func() {
server.Stop()
rootConn.Close()
nobodyConn.Close()
listen.Close()
//commitLog.Remove()
if telemetryExporter != nil {
time.Sleep(1500 * time.Millisecond)
telemetryExporter.Stop()
telemetryExporter.Close()
}
}
}
func testUnauthorized(
t *testing.T,
_,
client api.LogClient,
config *Config,
) {
ctx := context.Background()
// check produce permission
produce, err := client.Produce(ctx, &api.ProduceRequest{
Record: &api.Record{
Value: []byte("hello world"),
},
})
if produce != nil {
t.Fatalf("produce response should be nil")
}
gotCode, wantCode := status.Code(err), codes.PermissionDenied
if gotCode != wantCode {
t.Fatalf("got code: %d, want: %d", gotCode, wantCode)
}
// check consume permission
consume, err := client.Consume(ctx, &api.ConsumeRequest{Offset: 0})
if consume != nil {
t.Fatalf("consume response should be nil")
}
gotCode, wantCode = status.Code(err), codes.PermissionDenied
if gotCode != wantCode {
t.Fatalf("got code: %d, want: %d", gotCode, wantCode)
}
}
func testProduceConsume(t *testing.T, client, _ api.LogClient, config *Config) {
ctx := context.Background()
want := api.Record{
Value: []byte("hello world"),
}
produceRequest := api.ProduceRequest{
Record: &want,
}
//We also pass a context.Context object which lets us change our RPC’s behavior if necessary,
// such as time-out/cancel an RPC in flight.
produceResponse, err := client.Produce(ctx, &produceRequest)
require.NoError(t, err)
consumeRequest := api.ConsumeRequest{
Offset: produceResponse.Offset,
}
consumeResponse, err := client.Consume(ctx, &consumeRequest)
require.NoError(t, err)
require.Equal(t, want.Value, consumeResponse.Record.Value)
require.Equal(t, want.Offset, consumeResponse.Record.Offset)
}
func testConsumePastBoundary(t *testing.T, client, _ api.LogClient, config *Config) {
ctx := context.Background()
record := api.Record{
Value: []byte("hello world"),
}
prequest := api.ProduceRequest{
Record: &record,
}
produce, err := client.Produce(ctx, &prequest)
require.NoError(t, err)
crequest := api.ConsumeRequest{
Offset: produce.Offset + 1,
}
consume, err := client.Consume(ctx, &crequest)
if consume != nil {
t.Fatal("consume not nil")
}
got := grpc.Code(err)
want := grpc.Code(api.ErrOffsetOutOfRange{}.GRPCStatus().Err())
if got != want {
t.Fatalf("got err:%v, want: %v", got, want)
}
}
func testProduceConsumeStream(t *testing.T, client, _ api.LogClient, config *Config) {
ctx := context.Background()
records := []*api.Record{
{
Value: []byte("first message"),
Offset: 0,
},
{
Value: []byte("second message"),
Offset: 1,
},
}
// code block
{
stream, err := client.ProduceStream(ctx)
require.NoError(t, err)
for offset, record := range records {
err := stream.Send(&api.ProduceRequest{
Record: record,
})
require.NoError(t, err)
res, err := stream.Recv()
require.NoError(t, err)
if res.Offset != uint64(offset) {
t.Fatalf("got offset: %d, want: %d", res.Offset, offset)
}
}
}
// code block
{
stream, err := client.ConsumeStream(ctx, &api.ConsumeRequest{Offset: 0})
require.NoError(t, err)
for i, record := range records {
res, err := stream.Recv()
require.NoError(t, err)
require.Equal(t, res.Record, &api.Record{
Value: record.Value,
Offset: uint64(i),
})
}
}
}
|
package main
import (
"fmt"
"golang.org/x/text/unicode/norm"
)
func main() {
fmt.Println("à" == "à")
// Output: false
fmt.Println("\u00E0 == \u0061\u0300")
fmt.Println("\u00E0" == "\u0061\u0300")
norm1 := norm.NFD.String("\u00E0")
norm2 := norm.NFD.String("\u0061\u0300")
fmt.Println(norm1 == norm2)
// Output: true
}
|
package main
import "fmt"
type Property struct {
value int
}
// 设置属性的值
func (p *Property) SetValue(v int) {
p.value = v
}
func (p *Property) getValue() int {
return p.value
}
func main() {
//p := &Property{}
p := new(Property)
p.SetValue(1001)
fmt.Println(p.getValue())
}
|
package users
import (
"encoding/json"
"net/http"
"cinemo.com/shoping-cart/framework/web/httpresponse"
"cinemo.com/shoping-cart/internal/errorcode"
"cinemo.com/shoping-cart/pkg/auth"
)
// LoginHandlers handles login functionality
func LoginHandlers(service Service) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// unmarshal request
req := loginRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); (err != nil || req == loginRequest{}) {
httpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())
return
}
// validate request
if err := req.Validate(); err != nil {
httpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())
return
}
// validate User
user, err := service.Validate(ctx, req.Username, req.Password)
if err != nil {
httpresponse.ErrorResponseJSON(ctx, w, http.StatusForbidden, errorcode.LoginFailed, err.Error())
return
}
// create jwt token
token, err := auth.CreateJWT(user.Username)
if err != nil {
httpresponse.ErrorResponseJSON(ctx, w, http.StatusInternalServerError, errorcode.CreateTokenFailed, err.Error())
return
}
httpresponse.RespondJSON(w, http.StatusOK, loginResponse{
Token: string(token),
}, nil)
}
}
|
package flash
// 0_TreeNode
type TreeNode struct {
Id string
Label string
Children []TreeNode
}
func (t *TreeNode) Push(nodes []TreeNode) {
var tns []TreeNode
for i, e := range nodes {
if i == 0 {
tns = append(t.Children, e)
continue
}
tns = append(tns, e)
}
t.Children = tns
}
func FindNodeById(root TreeNode, id string) TreeNode {
noRes := TreeNode{Id: "-1", Label: "no result"}
if root.Id == id {
return root
}
if len(root.Children) == 0 {
return noRes
}
for _, n := range root.Children {
res := FindNodeById(n, id)
if res.Id != "-1" {
return res
}
}
return noRes
}
|
package main
import (
"fmt"
"github.com/hwdef/go-algorithm/sort/QuickSort"
)
func main() {
var a = []int{6, 5, 3, 1, 8, 7, 2, 4}
//fmt.Println(BucketSort.BucketSort(a))
//a = []int{6, 5, 3, 1, 8, 7, 2, 4}
//fmt.Println(SelectionSort.SelectionSort(a))
//a = []int{6, 5, 3, 1, 8, 7, 2, 4}
//fmt.Println(InsertionSort.InsertionSort(a))
a = []int{6, 5, 3, 1, 8, 7, 2, 4}
// fmt.Println(BubbleSort.BubbleSort(a))
fmt.Println(a)
QuickSort.QuickSort(a)
fmt.Println(a)
}
|
// This file was generated for SObject FeedAttachment, API Version v43.0 at 2018-07-30 03:48:11.864160076 -0400 EDT m=+58.208950665
package sobjects
import (
"fmt"
"strings"
)
type FeedAttachment struct {
BaseSObject
FeedEntityId string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
RecordId string `force:",omitempty"`
Title string `force:",omitempty"`
Type string `force:",omitempty"`
Value string `force:",omitempty"`
}
func (t *FeedAttachment) ApiName() string {
return "FeedAttachment"
}
func (t *FeedAttachment) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("FeedAttachment #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tFeedEntityId: %v\n", t.FeedEntityId))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tRecordId: %v\n", t.RecordId))
builder.WriteString(fmt.Sprintf("\tTitle: %v\n", t.Title))
builder.WriteString(fmt.Sprintf("\tType: %v\n", t.Type))
builder.WriteString(fmt.Sprintf("\tValue: %v\n", t.Value))
return builder.String()
}
type FeedAttachmentQueryResponse struct {
BaseQuery
Records []FeedAttachment `json:"Records" force:"records"`
}
|
package service
import (
"2019_2_IBAT/pkg/app/auth/session"
"2019_2_IBAT/pkg/app/notifs/notifsproto"
"2019_2_IBAT/pkg/app/recommends/recomsproto"
. "2019_2_IBAT/pkg/pkg/models"
"context"
"fmt"
"log"
"github.com/google/uuid"
)
type Service struct {
NotifChan chan NotifStruct
ConnectsPool WsConnects
AuthService session.ServiceClient
RecomService recomsproto.ServiceClient
}
func (h Service) SendNotification(ctx context.Context,
msg *notifsproto.SendNotificationMessage) (*notifsproto.Bool, error) {
notif := NotifStruct{
VacancyId: uuid.MustParse(msg.VacancyID),
TagIDs: StringsToUuids(msg.TagIDs),
}
h.NotifChan <- notif
return ¬ifsproto.Bool{}, nil
}
func (h Service) Notifications() {
for {
notif := <-h.NotifChan
fmt.Println("Notification accepted")
fmt.Println(notif)
ctx := context.Background()
idsMsg, err := h.RecomService.GetUsersForTags(ctx,
&recomsproto.IDsMessage{IDs: UuidsToStrings(notif.TagIDs)},
)
ids := StringsToUuids(idsMsg.IDs)
fmt.Println("Users ids intrested in new vacancy")
fmt.Println(ids)
if err != nil {
log.Printf("Notifications %s", err)
}
fmt.Println("connects.ConsMu.Lock()")
h.ConnectsPool.ConsMu.Lock()
for _, id := range ids {
fmt.Println("Notification ready to be sent to user")
fmt.Println(h.ConnectsPool.Connects[id])
if cons, ok := h.ConnectsPool.Connects[id]; ok {
fmt.Printf("Notification was sent to user %s\n", id.String())
for _, con := range cons.Connects {
con.Ch <- notif.VacancyId
}
}
}
h.ConnectsPool.ConsMu.Unlock()
fmt.Println("connects.ConsMu.Unlock()")
fmt.Println(ids)
}
}
|
package requests
type KeyStruct struct {
Key string `json:"key"`
}
// type GetBookByISBN struct {
// Publisher []string `json:"publishers"`
// Title string `json:"title"`
// NumberOfPages uint `json:"number_of_pages"`
// PublishDate string `json:"publish_date"`
// AuthorId []KeyStruct `json:"authors"`
// WorkId []KeyStruct `json:"works"`
// BookId string `json:"key"`
// }
// type GetGoogleBookByISBN struct {
// Items []struct {
// Id string `json:"id"`
// VolumeInfo struct {
// Title string `json:"title"`
// Authors []string `json:"authors"`
// Publisher string `json:"publisher"`
// PublishedDate string `json:"publishedDate"`
// Description string `json:"description"`
// NumberOfPages uint `json:"pageCount"`
// Language string `json:"language"`
// ImageLinks struct {
// Thumbnail string `json:"thumbnail"`
// } `json:"imageLinks"`
// } `json:"volumeInfo"`
// } `json:"items"`
// }
// type GetBookByWorkId struct {
// Description string `json:"description"`
// Title string `json:"title"`
// }
type CreateOrder struct {
Books []string `json:"books" form:"books"`
DestProvinsi string `json:"dest_provinsi" form:"dest_provinsi"`
DestKota string `json:"dest_kota" form:"dest_kota"`
DestKecamatan string `json:"dest_kecamatan" form:"dest_kecamatan"`
DestDesa string `json:"dest_desa" form:"dest_desa"`
DestAddress string `json:"dest_address" form:"dest_address"`
DestPostalCode string `json:"dest_postal_code" form:"dest_postal_code"`
ShippingCost uint `json:"shipping_cost" form:"shipping_cost"`
}
type UpdateOrderStatus struct {
Status bool `json:"status" form:"status"`
}
|
package test
import (
"fmt"
"testing"
"ppgo"
)
func TestConfig(t *testing.T) {
ppgo.API_ROOT = "/Users/wangpp/Code/github/go/src/ppgo-sample";
//初始化配置文件
ppgo.NewConfig("Config", "conf")
fmt.Println(ppgo.Config.GetString("system.port"));
}
|
package client
type WrappableError interface {
error
Unwrap() error
}
type CertificateReadError struct {
Err error
}
func (e *CertificateReadError) Error() string {
return "cannot read certificate"
}
func (e *CertificateReadError) Unwrap() error {
return e.Err
}
type CertificateDecodeError struct{}
func (e *CertificateDecodeError) Error() string {
return "cannot decode certificate"
}
type ConnectError struct {
Err error
}
func (e *ConnectError) Error() string {
return "cannot create connection"
}
func (e *ConnectError) Unwrap() error {
return e.Err
}
type DisconnectError struct {
Err error
}
func (e *DisconnectError) Error() string {
return "cannot close connection"
}
func (e *DisconnectError) Unwrap() error {
return e.Err
}
type InvalidConnectionError struct {
Err error
}
func (e *InvalidConnectionError) Error() string {
return "invalid connection state"
}
func (e *InvalidConnectionError) Unwrap() error {
return e.Err
}
|
package class
import (
"github.com/zxh0/jvm.go/jvmgo/jutil"
)
func (self *Obj) IsArray() bool {
return self.class.IsArray()
}
func (self *Obj) IsPrimitiveArray() bool {
return self.class.IsPrimitiveArray()
}
func (self *Obj) Refs() []*Obj {
return self.fields.([]*Obj)
}
func (self *Obj) Booleans() []int8 {
return self.fields.([]int8)
}
func (self *Obj) Bytes() []int8 {
return self.fields.([]int8)
}
func (self *Obj) Chars() []uint16 {
return self.fields.([]uint16)
}
func (self *Obj) Shorts() []int16 {
return self.fields.([]int16)
}
func (self *Obj) Ints() []int32 {
return self.fields.([]int32)
}
func (self *Obj) Longs() []int64 {
return self.fields.([]int64)
}
func (self *Obj) Floats() []float32 {
return self.fields.([]float32)
}
func (self *Obj) Doubles() []float64 {
return self.fields.([]float64)
}
func (self *Obj) GoBytes() []byte {
s := self.fields.([]int8)
return jutil.CastInt8sToUint8s(s)
}
|
package main
import "fmt"
type unexpectedResponseErr struct {
statusCode int
body string
}
func (e *unexpectedResponseErr) Error() string {
return fmt.Sprintf("error: unexpected response: %v %v", e.statusCode, e.body)
}
type invalidKeyTypeErr struct {
key string
val interface{}
}
func (e *invalidKeyTypeErr) Error() string {
return fmt.Sprintf("error: invalid type for key: type of %v is %T: %v", e.key, e.val, e.val)
}
|
package main
import (
"fmt"
tools "../tools"
)
func main() {
fmt.Println(tools.Add(10, 100))
} |
package downloader
import (
"io"
"log"
"net/http"
"os"
"sync"
)
type job struct {
url string
filename string
}
type Downloader struct {
threadNum int
jobs chan *job
waitGroup sync.WaitGroup
}
func New(threadNum int) *Downloader {
downloader := &Downloader{threadNum, make(chan *job, 0), sync.WaitGroup{}}
for i := 0; i < threadNum; i++ {
downloader.waitGroup.Add(1)
go func() {
defer downloader.waitGroup.Done()
for j := range downloader.jobs {
if err := download(j.url, j.filename); err != nil {
log.Printf("Failed to download %s: %v", j.url, err)
}
}
}()
}
return downloader
}
func (d *Downloader) AddJob(url, filename string) {
d.jobs <- &job{url, filename}
}
func (d *Downloader) Close() {
close(d.jobs)
d.waitGroup.Wait()
}
func download(url, filename string) error {
out, err := os.Create(filename)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if _, err = io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
|
// test-for project doc.go
/*
test-for document
*/
package main
|
package bulletproofs
import "incognito-chain/common"
type BulletproofsLogger struct {
Log common.Logger
}
func (logger *BulletproofsLogger) Init(inst common.Logger) {
logger.Log = inst
}
// Global instant to use
var Logger = BulletproofsLogger{}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.