text
stringlengths 11
4.05M
|
|---|
package password
import (
"golang.org/x/crypto/bcrypt"
)
func HashAndSalt(plaintextPassword string) (string, error) {
hash, err := bcrypt.GenerateFromPassword([]byte(plaintextPassword), bcrypt.MinCost)
if err != nil {
return "", err
}
return string(hash), nil
}
func ComparePlaintextWithEncypted(plaintextPassword string, encryptedPassword string) bool {
err := bcrypt.CompareHashAndPassword([]byte(encryptedPassword), []byte(plaintextPassword))
if err != nil {
return false
}
return true
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package table
import (
"errors"
)
// ReleasedAppTemplate 已生成版本服务的模版
type ReleasedAppTemplate struct {
ID uint32 `json:"id" gorm:"primaryKey"`
Spec *ReleasedAppTemplateSpec `json:"spec" gorm:"embedded"`
Attachment *ReleasedAppTemplateAttachment `json:"attachment" gorm:"embedded"`
Revision *CreatedRevision `json:"revision" gorm:"embedded"`
}
// TableName is the ReleasedAppTemplate's database table name.
func (r *ReleasedAppTemplate) TableName() string {
return "released_app_templates"
}
// AppID AuditRes interface
func (r *ReleasedAppTemplate) AppID() uint32 {
return r.Attachment.AppID
}
// ResID AuditRes interface
func (r *ReleasedAppTemplate) ResID() uint32 {
return r.ID
}
// ResType AuditRes interface
func (r *ReleasedAppTemplate) ResType() string {
return "released_app_template"
}
type RatiList []*ReleasedAppTemplate
// AppID AuditRes interface
func (rs RatiList) AppID() uint32 {
if len(rs) > 0 {
return rs[0].Attachment.AppID
}
return 0
}
// ResID AuditRes interface
func (rs RatiList) ResID() uint32 {
if len(rs) > 0 {
return rs[0].ID
}
return 0
}
// ResType AuditRes interface
func (rs RatiList) ResType() string {
return "released_app_template"
}
// ValidateCreate validate ReleasedAppTemplate is valid or not when create ir.
func (r *ReleasedAppTemplate) ValidateCreate() error {
if r.ID > 0 {
return errors.New("id should not be set")
}
if r.Spec == nil {
return errors.New("spec not set")
}
if err := r.Spec.ValidateCreate(); err != nil {
return err
}
if r.Attachment == nil {
return errors.New("attachment not set")
}
if err := r.Attachment.Validate(); err != nil {
return err
}
if r.Revision == nil {
return errors.New("revision not set")
}
return nil
}
// ReleasedAppTemplateSpec defines all the specifics for ReleasedAppTemplate set by user.
type ReleasedAppTemplateSpec struct {
ReleaseID uint32 `json:"release_id" gorm:"column:release_id"`
TemplateSpaceID uint32 `json:"template_space_id" gorm:"column:template_space_id"`
TemplateSpaceName string `json:"template_space_name" gorm:"column:template_space_name"`
TemplateSetID uint32 `json:"template_set_id" gorm:"column:template_set_id"`
TemplateSetName string `json:"template_set_name" gorm:"column:template_set_name"`
TemplateID uint32 `json:"template_id" gorm:"column:template_id"`
Name string `json:"name" gorm:"column:name"`
Path string `json:"path" gorm:"column:path"`
TemplateRevisionID uint32 `json:"template_revision_id" gorm:"column:template_revision_id"`
IsLatest bool `json:"is_latest" gorm:"column:is_latest"`
TemplateRevisionName string `json:"template_revision_name" gorm:"column:template_revision_name"`
TemplateRevisionMemo string `json:"template_revision_memo" gorm:"column:template_revision_memo"`
FileType string `json:"file_type" gorm:"column:file_type"`
FileMode string `json:"file_mode" gorm:"column:file_mode"`
User string `json:"user" gorm:"column:user"`
UserGroup string `json:"user_group" gorm:"column:user_group"`
Privilege string `json:"privilege" gorm:"column:privilege"`
Signature string `json:"signature" gorm:"column:signature"`
ByteSize uint64 `json:"byte_size" gorm:"column:byte_size"`
}
// ValidateCreate validate ReleasedAppTemplate spec when it is created.
func (r *ReleasedAppTemplateSpec) ValidateCreate() error {
if r.ReleaseID <= 0 {
return errors.New("invalid release id")
}
if r.TemplateSpaceID <= 0 {
return errors.New("invalid template space id")
}
if r.TemplateSetID <= 0 {
return errors.New("invalid template set id")
}
if r.TemplateID <= 0 {
return errors.New("invalid template id")
}
if r.TemplateRevisionID <= 0 {
return errors.New("invalid template revision id")
}
if r.TemplateSpaceName == "" {
return errors.New("template space name is empty")
}
if r.TemplateSetName == "" {
return errors.New("template set name is empty")
}
if r.TemplateRevisionName == "" {
return errors.New("template revision name is empty")
}
if r.Name == "" {
return errors.New("template config name is empty")
}
if r.Path == "" {
return errors.New("template config path is empty")
}
return nil
}
// ReleasedAppTemplateAttachment defines the ReleasedAppTemplate attachments.
type ReleasedAppTemplateAttachment struct {
BizID uint32 `json:"biz_id" gorm:"column:biz_id"`
AppID uint32 `json:"app_id" gorm:"column:app_id"`
}
// Validate whether ReleasedAppTemplate attachment is valid or not.
func (r *ReleasedAppTemplateAttachment) Validate() error {
if r.BizID <= 0 {
return errors.New("invalid attachment biz id")
}
if r.AppID <= 0 {
return errors.New("invalid attachment app id")
}
return nil
}
|
package models
import (
"github.com/astaxie/beego/orm"
"time"
)
//`id` bigint(20) NOT NULL COMMENT '主键id',
//`current_price` varchar(50) DEFAULT NULL COMMENT '当前价格',
//`balance` varchar(50) DEFAULT NULL COMMENT '差额',
//`price_rate` varchar(50) DEFAULT NULL COMMENT '增长率',
//`begin_date` datetime DEFAULT NULL COMMENT '开始时间',
//`end_date` datetime DEFAULT NULL COMMENT '结束时间',
//`is_use` bit(1) DEFAULT b'1' COMMENT '是否使用,1-正在使用,0-停止使用',
//`remark` varchar(500) DEFAULT NULL COMMENT '备注',
//`create_user` varchar(100) DEFAULT NULL COMMENT '创建人',
//`create_time` datetime DEFAULT NULL COMMENT '创建时间',
//`update_user` varchar(100) DEFAULT NULL COMMENT '更新人',
//`update_time` datetime DEFAULT NULL COMMENT '更新时间',
//`ext1` varchar(100) DEFAULT NULL COMMENT '扩展1',
//`ext2` varchar(100) DEFAULT NULL COMMENT '扩展2',
//`ext3` varchar(100) DEFAULT NULL COMMENT '扩展3',
//`is_delete` int(2) DEFAULT '0' COMMENT '是否删除:0、否 1、是',
type PointFluctuate struct {
Id int64
CurrentPrice string
Balance string
PriceRate string
BeginDate time.Time
EndDate time.Time
IsUse int
Remark string
CreateUser string
CreateTime time.Time
UpdateUser string
UpdateTime time.Time
Ext1 string
Ext2 string
Ext3 string
IsDelete int
}
func (p *PointFluctuate) TableName() string {
return TableName("point_fluctuate")
}
type ViewPointFluctuate struct {
//Id int64
PointFluctuate
}
func (p *ViewPointFluctuate) TableName() string {
return TableName("view_point_fluctuate")
}
func (p *PointFluctuate) Update(ormer orm.Ormer, fields ...string) error {
if _, err := ormer.Update(p, fields...); err != nil {
return err
}
return nil
}
func (p *PointFluctuate) Add(ormer orm.Ormer) (int64, error) {
return ormer.Insert(p)
}
func GetViewPointFluctuateByIsUse(isUse bool) (*ViewPointFluctuate, error) {
flag := 0
if isUse {
flag = 1
}
pf := new(ViewPointFluctuate)
err := orm.NewOrm().QueryTable(TableName("view_point_fluctuate")).Filter("isUse", flag).One(pf)
if err == nil {
return pf, nil
} else {
return nil, err
}
}
func GetPointFluctuateByIsUse(isUse bool) (*PointFluctuate, error) {
flag := 0
if isUse {
flag = 1
}
pf := new(PointFluctuate)
err := orm.NewOrm().QueryTable(TableName("view_point_fluctuate")).Filter("isUse", flag).One(pf)
if err == nil {
return pf, nil
} else {
return nil, err
}
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package runner provides utilities to execute commands.
package runner
import (
"bytes"
"fmt"
"io"
"log"
"os"
"os/exec"
)
// Runner is the interface to execute commands.
type Runner interface {
CmdRun(cmd *exec.Cmd) error
CmdOutput(cmd *exec.Cmd) ([]byte, error)
CmdCombinedOutput(cmd *exec.Cmd) ([]byte, error)
}
// Default is the Runner that executes the command by default.
type Default struct {
Quiet bool
}
// CmdRun executes the command.
func (d *Default) CmdRun(cmd *exec.Cmd) error {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%v: %s", err, stderr.String())
}
return nil
}
// CmdOutput executes the command and returns the command stdout.
func (d *Default) CmdOutput(cmd *exec.Cmd) ([]byte, error) {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
b, err := cmd.Output()
if err != nil {
return b, fmt.Errorf("%v: %s", err, stderr.String())
}
return b, nil
}
// CmdCombinedOutput executes the command and returns the command stdout and stderr.
func (d *Default) CmdCombinedOutput(cmd *exec.Cmd) ([]byte, error) {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
return cmd.CombinedOutput()
}
// Multi will both print the output for the user and return it to callers.
// Useful for debugging, e.g. if the importer calls terraform import but it freezes without returning output.
// Inspired by https://blog.kowalczyk.info/article/wOYk/advanced-command-execution-in-go-with-osexec.html
type Multi struct {
Quiet bool
}
// CmdRun executes the command and prints stdout and stderr without returning either.
func (d *Multi) CmdRun(cmd *exec.Cmd) error {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// CmdOutput executes the command and prints stdout and stderr then returns just stdout.
func (d *Multi) CmdOutput(cmd *exec.Cmd) ([]byte, error) {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
var stdout bytes.Buffer
cmd.Stdin = os.Stdin
cmd.Stdout = io.MultiWriter(os.Stdout, &stdout)
cmd.Stderr = os.Stderr
err := cmd.Run()
return stdout.Bytes(), err
}
// CmdCombinedOutput executes the command and prints stdout and stderr then returns them.
func (d *Multi) CmdCombinedOutput(cmd *exec.Cmd) ([]byte, error) {
if !d.Quiet {
log.Printf("Running: %v", cmd.Args)
}
var combined bytes.Buffer
cmd.Stdin = os.Stdin
cmd.Stdout = io.MultiWriter(os.Stdout, &combined)
cmd.Stderr = io.MultiWriter(os.Stderr, &combined)
err := cmd.Run()
return combined.Bytes(), err
}
// Dry is the Runner that only prints commands and does not execute them.
type Dry struct{}
// CmdRun prints the command.
func (*Dry) CmdRun(cmd *exec.Cmd) error {
log.Printf("%v", cmd.String())
return nil
}
// CmdOutput prints the command.
func (d *Dry) CmdOutput(cmd *exec.Cmd) ([]byte, error) {
return []byte(cmd.String()), d.CmdRun(cmd)
}
// CmdCombinedOutput prints the command.
func (d *Dry) CmdCombinedOutput(cmd *exec.Cmd) ([]byte, error) {
return []byte(cmd.String()), d.CmdRun(cmd)
}
|
package proto
// nolint:lll
func _() {
/*
dummy file that fixes this error when build tag proto is not specified:
$ go mod tidy
[...]
github.com/jwkohnen/airac imports
github.com/jwkohnen/airac/proto: module github.com/jwkohnen/airac@latest found (v1.0.4), but does not contain package github.com/jwkohnen/airac/proto
*/
}
|
package main
import "fmt"
type user struct{}
type manager struct {
user // 匿名字段
}
func (user) toString() string {
return "user"
}
func (m manager) toString() string {
return m.user.toString() + "; manager"
}
func main() {
var m manager
fmt.Println(m.toString())
fmt.Println(m.user.toString())
}
|
package main
import "fmt"
func main() {
var t complex64
t = 2.1 + 3.14i
fmt.Println(t)
//自动推导类型
//默认为complex128
t1 := 3.3 + 4.4i
fmt.Printf("%T\n",t1)
//通过内建函数 取实部 虚部
fmt.Println(real(t1),imag(t1))
}
|
package bash
import (
"os/exec"
"github.com/swanwish/go-common/logs"
)
func ExecuteCmd(command string) (string, error) {
logs.Debugf("Execute command %s", command)
cmd := exec.Command("bash", "-c", command)
cmdOutput, err := cmd.CombinedOutput()
if err != nil {
logs.Errorf("Failed to combiled output, the error is %v", err)
return "", err
} else {
return string(cmdOutput), nil
}
}
|
//Example of how to use generic tool to pipe in workloads and use
//concurrency to process input
package main
import (
"bufio"
"flag"
"fmt"
"log"
"net/http"
"os"
"sync"
)
var n int
type task interface {
process()
output()
}
type factory interface {
create(line string) task
}
// build program
//echo "https://www.google.com" | ./program_name
//or from a file
// ./program_name < filename.listoflinks
func main() {
count := flag.Int("count", 10, "number of workers")
flag.Parse()
n = *count
f := &Factory{}
run(f)
}
func run(f factory) {
var wg sync.WaitGroup
in := make(chan task)
wg.Add(1)
go func() {
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
in <- f.create(s.Text())
}
if s.Err() != nil {
log.Fatalf("Error reading STDIN: %s", s.Err())
}
close(in)
wg.Done()
}()
out := make(chan task)
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
for t := range in {
t.process()
out <- t
}
wg.Done()
}()
}
go func() {
wg.Wait()
close(out)
}()
for t := range out {
t.output()
}
}
//HTTPTask is a struct for holding necessary calling information
type HTTPTask struct {
url string
ok bool
}
func (h *HTTPTask) process() {
resp, err := http.Get(h.url)
if err != nil {
h.ok = false
return
}
if resp.StatusCode == http.StatusOK {
h.ok = true
return
}
h.ok = false
}
func (h *HTTPTask) output() {
fmt.Printf("%s %t\n", h.url, h.ok)
}
//Factory is a struct to employ the factory interface
type Factory struct {
}
func (f *Factory) create(line string) task {
h := &HTTPTask{}
h.url = line
return h
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//82. Remove Duplicates from Sorted List II
//Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.
//Example 1:
//Input: 1->2->3->3->4->4->5
//Output: 1->2->5
//Example 2:
//Input: 1->1->1->2->3
//Output: 2->3
///**
// * Definition for singly-linked list.
// * type ListNode struct {
// * Val int
// * Next *ListNode
// * }
// */
//func deleteDuplicates(head *ListNode) *ListNode {
//}
// Time Is Money
|
package engine
import (
"math"
"regexp"
)
type IRoutes interface {
Use(...HandlerFunc) IRoutes
Handle(string, string, ...HandlerFunc) IRoutes
Any(string, ...HandlerFunc) IRoutes
GET(string, ...HandlerFunc) IRoutes
POST(string, ...HandlerFunc) IRoutes
}
type IRouter interface {
IRoutes
Group(string, ...HandlerFunc) *RouterGroup
}
type RouterGroup struct {
Handlers HandlersChain
basePath string
engine *Engine
root bool
}
var _ IRouter = &RouterGroup{}
func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes {
group.Handlers = append(group.Handlers, middleware...)
return group.returnObj()
}
func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {
if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil {
panic("http method " + httpMethod + " is not valid")
}
return group.handle(httpMethod, relativePath, handlers)
}
func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes {
absolutePath := group.calculateAbsolutePath(relativePath)
handlers = group.combineHandlers(handlers)
group.engine.addRoute(httpMethod, absolutePath, handlers)
return group.returnObj()
}
func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup {
return &RouterGroup{
Handlers: group.combineHandlers(handlers),
basePath: group.calculateAbsolutePath(relativePath),
engine: group.engine,
}
}
func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes {
group.handle("GET", relativePath, handlers)
group.handle("POST", relativePath, handlers)
return group.returnObj()
}
// POST is a shortcut for router.Handle("POST", path, handle).
func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("POST", relativePath, handlers)
}
// GET is a shortcut for router.Handle("GET", path, handle).
func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("GET", relativePath, handlers)
}
func (group *RouterGroup) returnObj() IRoutes {
if group.root {
return group.engine
}
return group
}
func (group *RouterGroup) calculateAbsolutePath(relativePath string) string {
return joinPaths(group.basePath, relativePath)
}
func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain {
const abortIndex int8 = math.MaxInt8 / 2
finalSize := len(group.Handlers) + len(handlers)
if finalSize >= int(abortIndex) {
panic("too many handlers")
}
mergedHandlers := make(HandlersChain, finalSize)
copy(mergedHandlers, group.Handlers)
copy(mergedHandlers[len(group.Handlers):], handlers)
return mergedHandlers
}
|
package main
import (
"log"
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestVersion(t *testing.T) {
version := executeChecked("go run main.go -v")
assert.Equal(t, "oi version 0.0.1\n", version)
}
func executeChecked(command string) (output string) {
words := strings.Split(command, " ")
log.Println("Executing:", command)
stdout, err := exec.Command(words[0], words[1:]...).Output()
output = string(stdout)
if err != nil {
log.Fatalln("Error:", err)
}
return
}
|
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// TrilioVaultSpec defines the desired state of TrilioVault
type TrilioVaultSpec struct {
// Scope for the application which will be installed in the cluster
// NamespaceScope or ClusterScope
ApplicationScope string `json:"applicationScope,omitempty"`
// Namespace in which the application will be installed
ApplicationNamespace string `json:"applicationNamespace,omitempty"`
// Labels specifies the labels to attach to pods the operator creates for the
// application.
Labels map[string]string `json:"labels,omitempty"`
// Additional annotaions to be applied to pods during installation
Annotations map[string]string `json:"annotations,omitempty"`
// Resources is the resource requirements for the containers.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// NodeSelector specifies a map of key-value pairs. For the pod to be eligible
// to run on a node, the node must have each of the indicated key-value pairs as
// labels.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// The scheduling constraints on application pods.
Affinity *v1.Affinity `json:"affinity,omitempty"`
}
// TrilioVaultStatus defines the observed state of TrilioVault
type TrilioVaultStatus struct {
}
// +kubebuilder:object:root=true
// TrilioVault is the Schema for the triliovaults API
type TrilioVault struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TrilioVaultSpec `json:"spec,omitempty"`
Status TrilioVaultStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// TrilioVaultList contains a list of TrilioVault
type TrilioVaultList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []TrilioVault `json:"items"`
}
func init() {
SchemeBuilder.Register(&TrilioVault{}, &TrilioVaultList{})
}
|
// Copyright 2016-present, PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mockstore
import (
"bytes"
"context"
"math"
"strconv"
"testing"
"time"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/testutils"
"github.com/tikv/client-go/v2/tikv"
)
func TestClusterSplit(t *testing.T) {
rpcClient, cluster, pdClient, err := testutils.NewMockTiKV("", nil)
require.NoError(t, err)
testutils.BootstrapWithSingleStore(cluster)
mvccStore := rpcClient.MvccStore
store, err := tikv.NewTestTiKVStore(rpcClient, pdClient, nil, nil, 0)
require.NoError(t, err)
defer store.Close()
txn, err := store.Begin()
require.NoError(t, err)
// Mock inserting many rows in a table.
tblID := int64(1)
idxID := int64(2)
colID := int64(3)
handle := int64(1)
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
for i := 0; i < 1000; i++ {
rowKey := tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(handle))
colValue := types.NewStringDatum(strconv.Itoa(int(handle)))
// TODO: Should use session's TimeZone instead of UTC.
rd := rowcodec.Encoder{Enable: true}
rowValue, err1 := tablecodec.EncodeRow(sc, []types.Datum{colValue}, []int64{colID}, nil, nil, &rd)
require.NoError(t, err1)
txn.Set(rowKey, rowValue)
encodedIndexValue, err1 := codec.EncodeKey(sc, nil, []types.Datum{colValue, types.NewIntDatum(handle)}...)
require.NoError(t, err1)
idxKey := tablecodec.EncodeIndexSeekKey(tblID, idxID, encodedIndexValue)
txn.Set(idxKey, []byte{'0'})
handle++
}
err = txn.Commit(context.Background())
require.NoError(t, err)
// Split Table into 10 regions.
tableStart := tablecodec.GenTableRecordPrefix(tblID)
cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 10)
// 10 table regions and first region and last region.
regions := cluster.GetAllRegions()
require.Len(t, regions, 12)
allKeysMap := make(map[string]bool)
recordPrefix := tablecodec.GenTableRecordPrefix(tblID)
for _, region := range regions {
startKey := toRawKey(region.Meta.StartKey)
endKey := toRawKey(region.Meta.EndKey)
if !bytes.HasPrefix(startKey, recordPrefix) {
continue
}
pairs := mvccStore.Scan(startKey, endKey, math.MaxInt64, math.MaxUint64, kvrpcpb.IsolationLevel_SI, nil)
if len(pairs) > 0 {
require.Len(t, pairs, 100)
}
for _, pair := range pairs {
allKeysMap[string(pair.Key)] = true
}
}
require.Len(t, allKeysMap, 1000)
indexStart := tablecodec.EncodeTableIndexPrefix(tblID, idxID)
cluster.SplitKeys(indexStart, indexStart.PrefixNext(), 10)
allIndexMap := make(map[string]bool)
indexPrefix := tablecodec.EncodeTableIndexPrefix(tblID, idxID)
regions = cluster.GetAllRegions()
for _, region := range regions {
startKey := toRawKey(region.Meta.StartKey)
endKey := toRawKey(region.Meta.EndKey)
if !bytes.HasPrefix(startKey, indexPrefix) {
continue
}
pairs := mvccStore.Scan(startKey, endKey, math.MaxInt64, math.MaxUint64, kvrpcpb.IsolationLevel_SI, nil)
if len(pairs) > 0 {
require.Len(t, pairs, 100)
}
for _, pair := range pairs {
allIndexMap[string(pair.Key)] = true
}
}
require.Len(t, allIndexMap, 1000)
}
func toRawKey(k []byte) []byte {
if len(k) == 0 {
return nil
}
_, k, err := codec.DecodeBytes(k, nil)
if err != nil {
panic(err)
}
return k
}
|
package hello
func Hello() string {
return "Hello, world."
}
package hello
import "rsc.io/quote/v3"
func Hello() string {
return quote.HelloV3()
}
func Proverb() string {
return quote.Concurrency()
}
$ cat hello_test.go
package hello
import (
"testing"
)
func TestHello(t *testing.T) {
want := "Hello, world."
if got := Hello(); got != want {
t.Errorf("Hello() = %q, want %q", got, want)
}
}
func TestProverb(t *testing.T) {
want := "Concurrency is not parallelism."
if got := Proverb(); got != want {
t.Errorf("Proverb() = %q, want %q", got, want)
}
}
|
package main
import (
"testing"
"github.com/jackytck/projecteuler/tools"
)
func TestP76(t *testing.T) {
cases := []tools.TestCase{
{In: 100, Out: 190569291},
}
tools.TestIntInt(t, cases, dp, "P76")
}
|
package fileutil
import (
"os"
"runtime"
)
// Exists returns whether the given file or directory exists or not.
func Exists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// HomeDir returns the path to the current user's home directory.
func HomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
|
package types
import (
"fmt"
cmn "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/common"
)
// VoteMessage is sent when voting for a proposal (or lack thereof).
type VoteMessage struct {
Vote *Vote
}
// CommitStepMessage is sent when a block is committed.
type CommitStepMessage struct {
Height uint64
BlockPartsHeader PartSetHeader
BlockParts *cmn.BitArray
}
// String returns a string representation.
func (m *CommitStepMessage) String() string {
return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts)
}
|
package main
import (
"fmt"
"os"
"github.com/hoop33/perm/cmd"
"github.com/hoop33/perm/config"
)
func main() {
// Don't launch if we aren't going to be able to save our configuration
if err := verifyConfigDir(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
cmd.Execute()
}
func verifyConfigDir() error {
// Get the configuration directory as a string
dir := config.Dir()
// If it doesn't exist, create it
info, err := os.Stat(dir)
if err != nil && os.IsNotExist(err) {
return os.MkdirAll(dir, 0700)
}
// If Stat returned error, bail
if err != nil {
return err
}
// If it exists, but it's a file, bail
if !info.IsDir() {
return fmt.Errorf("config directory '%s' is a file", dir)
}
return nil
}
|
/*
* Copyright 2018 Haines Chan
*
* This program is free software; you can redistribute and/or modify it
* under the terms of the standard MIT license. See LICENSE for more details
*/
package etcd
import (
"context"
"crypto/tls"
"fmt"
"net"
"strings"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/hainesc/anchor/pkg/store"
"github.com/hainesc/anchor/pkg/utils"
)
const (
ipsPrefix = "/anchor/cn/"
gatewayPrefix = "/anchor/gw/"
userPrefix = "/anchor/ns/"
lockKey = "/anchor/lock"
)
// Etcd is a simple etcd-backed store
type Etcd struct {
mutex *concurrency.Mutex
kv clientv3.KV
}
// Store implements the Store interface
var _ store.Store = &Etcd{}
// NewEtcdClient news a etcd client
func NewEtcdClient(network string, endPoints []string, tlsConfig *tls.Config) (*Etcd, error) {
// We don't check the config here since clientv3 will do.
cli, err := clientv3.New(clientv3.Config{
Endpoints: endPoints,
DialTimeout: 5 * time.Second,
TLS: tlsConfig,
})
if err != nil {
return nil, err
}
session, err := concurrency.NewSession(cli)
if err != nil {
return nil, err
}
mutex := concurrency.NewMutex(session, lockKey)
kv := clientv3.NewKV(cli)
return &Etcd{mutex, kv}, nil
}
// NewEtcdClientWithoutSSl news a etcd client without ssl
func NewEtcdClientWithoutSSl(network string, endPoints []string) (*Etcd, error) {
// We don't check the config here since clientv3 will do.
cli, err := clientv3.New(clientv3.Config{
Endpoints: endPoints,
DialTimeout: 5 * time.Second,
})
if err != nil {
return nil, err
}
session, err := concurrency.NewSession(cli)
if err != nil {
return nil, err
}
mutex := concurrency.NewMutex(session, lockKey)
kv := clientv3.NewKV(cli)
return &Etcd{mutex, kv}, nil
}
// Lock locks the store
func (e *Etcd) Lock() error {
return e.mutex.Lock(context.TODO())
}
// Unlock unlocks the store
func (e *Etcd) Unlock() error {
return e.mutex.Unlock(context.TODO())
}
// Close closes the store
func (e *Etcd) Close() error {
return nil
// return s.Unlock()
}
// RetrieveGateway retrieves gateway for subnet.
func (e *Etcd) RetrieveGateway(subnet *net.IPNet) net.IP {
resp, err := e.kv.Get(context.TODO(), gatewayPrefix + subnet.String())
if err != nil || len(resp.Kvs) == 0 {
return nil
}
return net.ParseIP(string(resp.Kvs[0].Value))
}
// RetrieveAllocated retrieves allocated IPs in subnet for namespace.
func (e *Etcd) RetrieveAllocated(namespace string, subnet *net.IPNet) (*utils.RangeSet, error) {
resp, err := e.kv.Get(context.TODO(), userPrefix + namespace)
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
return nil, fmt.Errorf("no IP allocated for %s found in etcd", namespace)
}
// TODO:
ret := utils.RangeSet{}
return ret.Concat(string(resp.Kvs[0].Value), subnet)
}
// RetrieveUsed retrieves used IP in subnet for namespace.
func (e *Etcd) RetrieveUsed(namespace string, subnet *net.IPNet) (*utils.RangeSet, error) {
resp, err := e.kv.Get(context.TODO(), ipsPrefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
// TODO: which return type is best? []string or RangeSet?
// ret := make([]net.IP, 0)
s := make([]string, 0)
for _, item := range resp.Kvs {
row := strings.Split(string(item.Value), ",")
if row[2] == namespace {
// ret = append(ret, net.ParseIP(row[0]))
s = append(s, row[0])
}
}
ret := utils.RangeSet{}
// TODO:
return ret.Concat(strings.Join(s, ","), subnet)
}
// Reserve writes the result to the store.
func (e *Etcd) Reserve(id string, ip net.IP, podName string, podNamespace string, controllerName string) (bool, error) {
// TODO: lock
if _, err := e.kv.Put(context.TODO(), ipsPrefix + id,
ip.String() + "," + podName + "," + podNamespace + "," + controllerName); err != nil {
return false, nil
}
return true, nil
}
// Release releases the IP which allocated to the container identified by id.
func (e *Etcd) Release(id string) error {
_, err := e.kv.Delete(context.TODO(), ipsPrefix + id)
return err
}
// GatewayMap is the map of subnet and gateway, used by monkey
type GatewayMap struct {
Subnet string `json:"subnet"`
Gateway string `json:"gw"`
}
// AllocateMap is the map of dedicated IPs and the namespace, used by monkey
type AllocateMap struct {
Allocate string `json:"ips"`
Namespace string `json:"ns"`
// TODO: Add label to support explicate which IPs to use for a given pods.
// Label string `json:"label"`
}
// InUsedMap is the map of ContainerID and its IP, used by monkey
type InUsedMap struct {
ContainerID string `json:"id"`
IP net.IP `json:"ip"`
Pod string `json:"pod"`
Namespace string `json:"ns"`
App string `json:"app,omitempty"`
Service string `json:"svc,omitempty"`
}
// AllGatewayMap gets all gateway map in the store
func (e *Etcd) AllGatewayMap() (*[]GatewayMap, error) {
gms := make([]GatewayMap, 0)
resp, err := e.kv.Get(context.TODO(), gatewayPrefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
// s := make([]string, 0)
for _, item := range resp.Kvs {
subnet := strings.TrimPrefix(string(item.Key), gatewayPrefix)
gw := string(item.Value)
_, _, err := net.ParseCIDR(subnet)
if err != nil {
// ivalid format, just omit.
continue
}
if net.ParseIP(gw) == nil {
// ivalid format, just omit.
continue
}
gms = append(gms, GatewayMap{
// Subnet: *subnet,
// Gateway: gateway,
Subnet: subnet,
Gateway: gw,
})
}
return &gms, nil
}
// InsertGatewayMap inserts a gateway map
func (e *Etcd) InsertGatewayMap(gm GatewayMap) error {
if _, err := e.kv.Put(context.TODO(), gatewayPrefix+gm.Subnet, gm.Gateway); err != nil {
return err
}
return nil
}
// DeleteGatewayMap deletes a gateway map
func (e *Etcd) DeleteGatewayMap(gms []GatewayMap) error {
for _, gm := range gms {
// if _, err := e.kv.Delete(context.TODO(), gm.Subnet.String()); err != nil {
if _, err := e.kv.Delete(context.TODO(), gatewayPrefix+gm.Subnet); err != nil {
// TODO: error when delete one item, should we just stop and return error?
// If we omit one error, maybe all errors are omitted.
return err
}
}
return nil
}
// RetrieveUsedbyNamespace retrieves used IP in subnet for namespace.
func (e *Etcd) RetrieveUsedbyNamespace(namespace string, adminRole bool) (*[]string, error) {
resp, err := e.kv.Get(context.TODO(), ipsPrefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
// TODO: which return type is best? []string or RangeSet?
// ret := make([]net.IP, 0)
s := make([]string, 0)
if adminRole {
for _, item := range resp.Kvs {
s = append(s, string(item.Value))
}
} else {
for _, item := range resp.Kvs {
row := strings.Split(string(item.Value), ",")
if row[2] == namespace {
s = append(s, string(item.Value))
}
}
}
return &s, nil
}
// AllAllocate gets all allocate map
func (e *Etcd) AllAllocate() (*[]AllocateMap, error) {
ams := make([]AllocateMap, 0)
resp, err := e.kv.Get(context.TODO(), userPrefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
// s := make([]string, 0)
for _, item := range resp.Kvs {
ns := strings.TrimPrefix(string(item.Key), userPrefix)
allocate := string(item.Value)
ams = append(ams, AllocateMap{
// Subnet: *subnet,
// Gateway: gateway,
Allocate: allocate,
Namespace: ns,
})
}
return &ams, nil
}
// InsertAllocateMap inserts a allocate map
func (e *Etcd) InsertAllocateMap(am AllocateMap) error {
if _, err := e.kv.Put(context.TODO(), userPrefix+am.Namespace, am.Allocate); err != nil {
return err
}
return nil
}
// DeleteAllocateMap deletes a allocate map
func (e *Etcd) DeleteAllocateMap(ams []AllocateMap) error {
for _, am := range ams {
// if _, err := e.kv.Delete(context.TODO(), gm.Subnet.String()); err != nil {
if _, err := e.kv.Delete(context.TODO(), userPrefix+am.Namespace); err != nil {
// TODO: error when delete one item, should we just stop and return error?
// If we omit one error, maybe all errors are omitted.
return err
}
}
return nil
}
|
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// RootCmd is the root command of tankerctl
var RootCmd = &cobra.Command{
Use: "tankerctl",
Short: "Export gasoline data as sensision metrics",
}
func init() {
cobra.OnInitialize(configure)
RootCmd.PersistentFlags().BoolP("verbose", "v", false, "enable verbose output")
viper.BindPFlags(RootCmd.PersistentFlags())
}
func configure() {
if viper.GetBool("verbose") {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
viper.AddConfigPath("/etc/tankerctl")
viper.AddConfigPath("$HOME/.tankerctl")
viper.AddConfigPath(".")
viper.SetConfigName("config")
if err := viper.MergeInConfig(); err != nil {
log.Warn(err)
}
}
|
package main
import (
"fmt"
"os"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func showImageSubcommand(args []string, logger log.DebugLogger) error {
if err := showImage(args[0]); err != nil {
return fmt.Errorf("error showing image: %s", err)
}
return nil
}
func showImage(image string) error {
fs, _, err := getTypedImage(image)
if err != nil {
return err
}
return fs.Listf(os.Stdout, listSelector, listFilter)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//132. Palindrome Partitioning II
//Given a string s, partition s such that every substring of the partition is a palindrome.
//Return the minimum cuts needed for a palindrome partitioning of s.
//Example:
//Input: "aab"
//Output: 1
//Explanation: The palindrome partitioning ["aa","b"] could be produced using 1 cut.
//func minCut(s string) int {
//}
// Time Is Money
|
package ntp
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/configuration/validator"
)
func TestShouldCheckNTPV4(t *testing.T) {
config := &schema.Configuration{
NTP: schema.NTP{
Address: &schema.AddressUDP{Address: schema.NewAddressFromNetworkValues(schema.AddressSchemeUDP, "time.cloudflare.com", 123)},
Version: 4,
MaximumDesync: time.Second * 3,
},
}
sv := schema.NewStructValidator()
validator.ValidateNTP(config, sv)
ntp := NewProvider(&config.NTP)
assert.NoError(t, ntp.StartupCheck())
}
func TestShouldCheckNTPV3(t *testing.T) {
config := &schema.Configuration{
NTP: schema.NTP{
Address: &schema.AddressUDP{Address: schema.NewAddressFromNetworkValues(schema.AddressSchemeUDP, "time.cloudflare.com", 123)},
Version: 3,
MaximumDesync: time.Second * 3,
},
}
sv := schema.NewStructValidator()
validator.ValidateNTP(config, sv)
ntp := NewProvider(&config.NTP)
assert.NoError(t, ntp.StartupCheck())
}
|
package meda
import (
"context"
"database/sql"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const checksumWarningsTableNameBase = "checksum_warnings"
func (d *DB) ChecksumWarningsTableName() string {
return d.Config.TablePrefix + checksumWarningsTableNameBase
}
const checksumWarningsCreateTableQuery = GenericQuery(`
CREATE TABLE IF NOT EXISTS {CHECKSUM_WARNINGS} (
id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
file_id bigint(20) unsigned NOT NULL,
path varbinary(4096) NOT NULL,
modification_time datetime(6) NOT NULL,
file_size bigint(20) unsigned NOT NULL,
expected_checksum varbinary(64) NOT NULL,
actual_checksum varbinary(64) NOT NULL,
discovered bigint(20) unsigned NOT NULL,
last_read bigint(20) unsigned NOT NULL,
created datetime(6) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
`)
func (d *DB) checksumWarningsCreateTable(ctx context.Context) error {
_, err := d.ExecContext(ctx, checksumWarningsCreateTableQuery.SubstituteAll(d))
return errors.Wrap(err, "(*DB).checksumWarningsCreateTable")
}
type ChecksumWarning struct {
ID uint64 `db:"id"`
FileID uint64 `db:"file_id"`
Path string `db:"path"`
ModificationTime Time `db:"modification_time"`
FileSize uint64 `db:"file_size"`
ExpectedChecksum []byte `db:"expected_checksum"`
ActualChecksum []byte `db:"actual_checksum"`
Discovered uint64 `db:"discovered"`
LastRead uint64 `db:"last_read"`
Created Time `db:"created"`
}
const checksumWarningsPrepareInsertQuery = GenericQuery(`
INSERT INTO {CHECKSUM_WARNINGS} (
file_id,
path,
modification_time,
file_size,
expected_checksum,
actual_checksum,
discovered,
last_read,
created
) VALUES (
:file_id,
:path,
:modification_time,
:file_size,
:expected_checksum,
:actual_checksum,
:discovered,
:last_read,
:created
)
;
`)
func (d *DB) ChecksumWarningsPrepareInsert(ctx context.Context, preparer NamedPreparerContext) (*sqlx.NamedStmt, error) {
if preparer == nil {
preparer = &d.DB
}
stmt, err := preparer.PrepareNamedContext(ctx, checksumWarningsPrepareInsertQuery.SubstituteAll(d))
if err != nil {
return nil, errors.Wrap(err, "(*DB).ChecksumWarningsPrepareInsert")
}
return stmt, nil
}
func checksumWarningsAppendFromRows(checksumWarnings []ChecksumWarning, rows *sqlx.Rows) ([]ChecksumWarning, error) {
baseInd := len(checksumWarnings)
var err error
i := baseInd
for rows.Next() {
if i == cap(checksumWarnings) {
checksumWarnings = append(checksumWarnings, ChecksumWarning{})
} else {
checksumWarnings = checksumWarnings[:len(checksumWarnings)+1]
}
err = rows.StructScan(&checksumWarnings[i])
if err != nil {
_ = rows.Close()
return checksumWarnings[:baseInd], errors.Wrap(err, "checksumWarningsAppendFromRows: scan row into struct")
}
i += 1
}
if err = rows.Err(); err != nil {
_ = rows.Close()
return checksumWarnings[:baseInd], errors.Wrap(err, "checksumWarningsAppendFromRows: iterate over rows")
}
if err = rows.Close(); err != nil {
return checksumWarnings[:baseInd], errors.Wrap(err, "checksumWarningsAppendFromRows: close rows")
}
return checksumWarnings, nil
}
const checksumWarningsQueryAllQuery = GenericQuery(`
SELECT
id,
file_id,
path,
modification_time,
file_size,
expected_checksum,
actual_checksum,
discovered,
last_read,
created
FROM {CHECKSUM_WARNINGS}
ORDER BY id ASC
;
`)
func (d *DB) ChecksumWarningsQueryAll(ctx context.Context, querier sqlx.QueryerContext) (*sqlx.Rows, error) {
if querier == nil {
querier = &d.DB
}
rows, err := querier.QueryxContext(ctx, checksumWarningsQueryAllQuery.SubstituteAll(d))
if err != nil {
return nil, errors.Wrap(err, "(*DB).ChecksumWarningsQueryAll")
}
return rows, nil
}
func (d *DB) ChecksumWarningsFetchAll(ctx context.Context, querier sqlx.QueryerContext) ([]ChecksumWarning, error) {
warnings, err := d.ChecksumWarningsAppendAll(nil, ctx, querier)
if err != nil {
return nil, errors.Wrap(err, "(*DB).ChecksumWarningsFetchAll")
}
return warnings, nil
}
func (d *DB) ChecksumWarningsAppendAll(checksumWarnings []ChecksumWarning, ctx context.Context, querier sqlx.QueryerContext) ([]ChecksumWarning, error) {
rows, err := d.ChecksumWarningsQueryAll(ctx, querier)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendAll")
}
checksumWarnings, err = checksumWarningsAppendFromRows(checksumWarnings, rows)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendAll")
}
return checksumWarnings, nil
}
const checksumWarningsQueryFromLastNRunsQuery = GenericQuery(`
SELECT
{CHECKSUM_WARNINGS}.id,
{CHECKSUM_WARNINGS}.file_id,
{CHECKSUM_WARNINGS}.path,
{CHECKSUM_WARNINGS}.modification_time,
{CHECKSUM_WARNINGS}.file_size,
{CHECKSUM_WARNINGS}.expected_checksum,
{CHECKSUM_WARNINGS}.actual_checksum,
{CHECKSUM_WARNINGS}.discovered,
{CHECKSUM_WARNINGS}.last_read,
{CHECKSUM_WARNINGS}.created
FROM {CHECKSUM_WARNINGS}
INNER JOIN (
SELECT
id
FROM {RUNS}
ORDER BY id DESC
LIMIT ?
) AS last_runs
ON {CHECKSUM_WARNINGS}.discovered = last_runs.id
ORDER BY id ASC
;
`)
func (d *DB) ChecksumWarningsQueryFromLastNRuns(ctx context.Context, querier sqlx.QueryerContext, n uint64) (*sqlx.Rows, error) {
if querier == nil {
querier = &d.DB
}
rows, err := querier.QueryxContext(ctx, checksumWarningsQueryFromLastNRunsQuery.SubstituteAll(d), n)
if err != nil {
return nil, errors.Wrapf(err, "(*DB).ChecksumWarningsQueryFromLastNRuns: n = %d", n)
}
return rows, nil
}
func (d *DB) ChecksumWarningsFetchFromLastNRuns(ctx context.Context, querier sqlx.QueryerContext, n uint64) ([]ChecksumWarning, error) {
warnings, err := d.ChecksumWarningsAppendFromLastNRuns(nil, ctx, querier, n)
if err != nil {
return warnings, errors.Wrap(err, "(*DB).ChecksumWarningsFetchFromLastNRuns")
}
return warnings, nil
}
func (d *DB) ChecksumWarningsAppendFromLastNRuns(checksumWarnings []ChecksumWarning, ctx context.Context, querier sqlx.QueryerContext, n uint64) ([]ChecksumWarning, error) {
rows, err := d.ChecksumWarningsQueryFromLastNRuns(ctx, querier, n)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendFromLastNRuns")
}
checksumWarnings, err = checksumWarningsAppendFromRows(checksumWarnings, rows)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendFromLastNRuns")
}
return checksumWarnings, nil
}
const checksumWarningsQueryFromRunByIDQuery = GenericQuery(`
SELECT
id,
file_id,
path,
modification_time,
file_size,
expected_checksum,
actual_checksum,
discovered,
last_read,
created
FROM {CHECKSUM_WARNINGS}
WHERE discovered = ?
ORDER BY id ASC
;
`)
func (d *DB) ChecksumWarningsQueryFromRunByID(ctx context.Context, querier sqlx.QueryerContext, runID uint64) (*sqlx.Rows, error) {
if querier == nil {
querier = &d.DB
}
rows, err := querier.QueryxContext(ctx, checksumWarningsQueryFromRunByIDQuery.SubstituteAll(d), runID)
if err != nil {
return nil, errors.Wrapf(err, "(*DB).ChecksumWarningsQueryFromRunByID: runID = %d", runID)
}
return rows, nil
}
func (d *DB) ChecksumWarningsFetchFromRunByID(ctx context.Context, querier sqlx.QueryerContext, runID uint64) ([]ChecksumWarning, error) {
checksumWarnings, err := d.ChecksumWarningsAppendFromRunByID(nil, ctx, querier, runID)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsFetchFromRunByID")
}
return checksumWarnings, nil
}
func (d *DB) ChecksumWarningsAppendFromRunByID(checksumWarnings []ChecksumWarning, ctx context.Context, querier sqlx.QueryerContext, runID uint64) ([]ChecksumWarning, error) {
rows, err := d.ChecksumWarningsQueryFromRunByID(ctx, querier, runID)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendFromRunByID")
}
checksumWarnings, err = checksumWarningsAppendFromRows(checksumWarnings, rows)
if err != nil {
return checksumWarnings, errors.Wrap(err, "(*DB).ChecksumWarningsAppendFromRunByID")
}
return checksumWarnings, nil
}
const checksumWarningsDeleteByIDQuery = GenericQuery(`
DELETE FROM {CHECKSUM_WARNINGS}
WHERE id IN (?)
;
`)
func (d *DB) ChecksumWarningsDeleteByID(ctx context.Context, execer RebindExecerContext, checksumWarningIDs []uint64) (sql.Result, error) {
if execer == nil {
execer = &d.DB
}
query, args, err := sqlx.In(checksumWarningsDeleteByIDQuery.SubstituteAll(d), checksumWarningIDs)
if err != nil {
return nil, errors.Wrap(err, "(*DB).ChecksumWarningsDeleteByID: expand query")
}
// query is a generic query using `?` as the bindvar.
// It needs to be rebound to match the backend in use.
query = execer.Rebind(query)
res, err := execer.ExecContext(ctx, query, args...)
if err != nil {
return nil, errors.Wrap(err, "(*DB).ChecksumWarningsDeleteByID: exec query")
}
return res, nil
}
var _ sql.Result = rowsAffectedResult(0)
type rowsAffectedResult int64
func (r rowsAffectedResult) LastInsertId() (int64, error) {
return 0, nil
}
func (r rowsAffectedResult) RowsAffected() (int64, error) {
return int64(r), nil
}
func (d *DB) ChecksumWarningsDeleteChecksumWarnings(ctx context.Context, execer RebindExecerContext, checksumWarnings []ChecksumWarning) (sql.Result, error) {
var checksumWarningIDs []uint64
var totalRowsAffected int64
for i := 0; i < len(checksumWarnings); {
rangeEnd := i + MaxPlaceholders
if rangeEnd >= len(checksumWarnings) {
rangeEnd = len(checksumWarnings)
}
checksumWarningIDs = append(checksumWarningIDs[:0], make([]uint64, rangeEnd-i)...)
for ind := range checksumWarnings[i:rangeEnd] {
checksumWarningIDs[ind] = checksumWarnings[i+ind].ID
}
res, err := d.ChecksumWarningsDeleteByID(ctx, execer, checksumWarningIDs)
if rowsAffected, err := res.RowsAffected(); err == nil {
totalRowsAffected += rowsAffected
}
if err != nil {
return rowsAffectedResult(totalRowsAffected), errors.Wrap(err, "(*DB).ChecksumWarningsDeleteChecksumWarnings")
}
i = rangeEnd
}
return rowsAffectedResult(totalRowsAffected), nil
}
|
/*
Challenge
Premise
Bob is a novice pianist who can only play sequences of single notes. In addition, he does quite an interesting thing: for every note after the first, if it's higher than the previous note was, he uses the finger directly to the right of the previous finger used; if lower, to the left; if the same pitch, well, the same finger.
Let's take Auld Lang Syne as an example, and arbitrarily suppose, only for the sake of this example, that Bob uses the very right side of his right hand.
Pitch: Should < auld = ac- = quain- < tance > be > for- < got
Digit: mid ring ring ring pinky ring mid ring
Alice wants to convince him of the stupidity of his playing...
Task
Input: a sequence of n
MIDI note numbers (which are integers between 0 and 127 inclusive), where 2≤n≤10000.
Output: the number of fingers required to finish the melody with the playing style outlined in 'Premise'.
Please note that the answer may be more than 5.
No consideration of the starting finger is needed. Assume that the choice is optimal for playable melodies and has nothing (else) to do with the number of fingers required.
Example 1
Input: 0 0 0 0
Output: 1
Example 2
Input: 43 48 48 48 52 50 48 50
Output: 3
Example 3
Input: 86 88 84 81 83 79 74 76 72 69 71 67 62 64 60 57 59 57 56 55
Output: 9
Example 4
Input: 82 79 78 76 78 76 74 73 70 67 66 64 66 64 62 61
Output: 12
Remarks
This is code-golf, so fewest bytes wins.
Standard rules, I/O rules and loophole rules apply.
If possible, link an online demo of your code.
Please explain your code.
*/
package main
func main() {
assert(fingers([]int{0, 0, 0, 0}) == 1)
assert(fingers([]int{43, 48, 48, 48, 52, 50, 48, 50}) == 3)
assert(fingers([]int{86, 88, 84, 81, 83, 79, 74, 76, 72, 69, 71, 67, 62, 64, 60, 57, 59, 57, 56, 55}) == 9)
assert(fingers([]int{82, 79, 78, 76, 78, 76, 74, 73, 70, 67, 66, 64, 66, 64, 62, 61}) == 12)
assert(fingers([]int{2}) == 1)
assert(fingers([]int{}) == 0)
}
func sign(x, y int) int {
if y > x {
return 1
}
if y < x {
return -1
}
return 0
}
func fingers(p []int) int {
if len(p) == 0 {
return 0
}
n := 1
m := map[int]struct{}{n: struct{}{}}
for i := 0; i < len(p)-1; i++ {
n += sign(p[i], p[i+1])
m[n] = struct{}{}
}
return len(m)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package logger
import (
"errors"
"fmt"
_ "github.com/mailru/easyjson/gen"
"path/filepath"
"runtime"
"strings"
"time"
)
const ISO8601 = "2006-02-03 15:04:05"
const (
LevelError = iota
LevelWarning
LevelInformational
LevelDebug
)
//easyjson
type logger struct {
level uint8 `json:"omitempty"`
Level string `json:"level"`
Date string `json:"time"`
Queue string `json:"queue"`
ID string `json:"id"`
Message string `json:"message"`
}
func (l *logger) SetLevel(level string) error {
switch level{
case "DEBUG":
l.level=LevelDebug
case "ERROR":
l.level=LevelError
case "WARN":
l.level=LevelWarning
case "INFO":
l.level=LevelInformational
default:
return errors.New("level undefined")
}
return nil
}
func NewLogger(Queue string) *logger {
return &logger{
Queue: Queue,
}
}
func (l *logger) SetID(ID string){
l.ID=ID
}
func (l *logger) Info(msg string) {
l.Message = msg
l.Level = "INFO"
l.Date = time.Now().Format(ISO8601)
fmt.Println(l.toJson())
}
func (l *logger) Warn(msg string) {
if l.level<=LevelWarning{
return
}
l.Level="WARN"
l.Message = msg
fmt.Println(l.toJson())
}
func (l *logger) Error(err error) {
msg := createMessageLog(err)
l.Message = msg.Error()
l.Level="WARN"
l.Date = time.Now().Format(ISO8601)
fmt.Println(l.toJson())
}
func (l *logger) Debug(msg string) {
if l.level<LevelError{
return
}
l.Level="DEBUG"
l.Message = msg
l.Date = time.Now().Format(ISO8601)
fmt.Println(l.toJson())
}
func (l *logger) toJson() string {
data, err := l.MarshalJSON()
if err != nil {
return err.Error()
}
return string(data)
}
func createMessageLog(err error) error {
pc, fn, line, ok := runtime.Caller(1)
if !ok {
return fmt.Errorf("in function:unknown file:unknown line:unknown message: %v", err)
}
nameFull := runtime.FuncForPC(pc).Name()
nameEnd := filepath.Ext(nameFull)
name := strings.TrimPrefix(nameEnd, ".")
if name == "0" {
name = "init"
}
return fmt.Errorf("in function:%s file:%s line:%d message:%v", name, filepath.Base(fn), line, err)
}
|
package qiwi
import (
"fmt"
"strconv"
)
type currency string
const (
RUB currency = "RUB"
USD currency = "USD"
EUR currency = "EUR"
//GBP Currency = "GBP"
)
// kopeeksInRuble used to make float amount value from int
const kopeeksInRuble float64 = 100
type money float64
func toMoney(a int) money {
return money(float64(a) / kopeeksInRuble)
}
// Int returns amount in kopeks/cents
func (m money) Int() int {
return int(float64(m) * kopeeksInRuble)
}
// String returns float with amount representation
func (m money) String() string {
return fmt.Sprintf("%.2f", m)
}
func (m money) MarshalJSON() ([]byte, error) {
s := fmt.Sprintf("%.2f", float64(m))
return []byte(s), nil
}
func (m *money) UnmarshalJSON(data []byte) error {
const doublequotesign byte = 32 // "
if data[0] == doublequotesign {
data = data[1 : len(data)-1]
}
am, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return fmt.Errorf("[QIWI] Amount JSON error: %w", ErrBadJSON)
}
*m = money(am)
return err
}
type Amount struct {
Value money `json:"value"`
Currency currency `json:"currency"`
}
func newAmount(a int, cur currency) Amount {
return Amount{Value: toMoney(a), Currency: cur}
}
// NewAmountInRubles make sets rubles amount
func NewAmountInRubles(a int) Amount {
return newAmount(a, RUB)
}
// NewAmountInDollars make sets dollars amount
func NewAmountInDollars(a int) Amount {
return newAmount(a, USD)
}
// NewAmountInEuros make sets euros amount
func NewAmountInEuros(a int) Amount {
return newAmount(a, EUR)
}
|
package ping
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_ping_unit(t *testing.T) {
Convey("ping", t, func() {
Convey("ParsePingResponseLine()", func() {
tests := []string{
"64 bytes from 127.0.0.1: icmp_seq=0 ttl=64 time=0.052 ms", // bsd
"64 bytes from iad23s06-in-f0.1e100.net (74.125.228.32): icmp_seq=1 ttl=46 time=1.76 ms", // gnu
"64 bytes from ip-23-229-234-162.ip.secureserver.net (23.229.234.162): icmp_seq=2 ttl=45 time=47.8 ms", // gnu
}
results := []PingResponse{
PingResponse{Host: "", IP: "127.0.0.1", ICMPSeq: 0, TTL: 64, Time: 0.052},
PingResponse{Host: "", IP: "74.125.228.32", ICMPSeq: 1, TTL: 46, Time: 1.76},
PingResponse{Host: "", IP: "23.229.234.162", ICMPSeq: 2, TTL: 45, Time: 47.8},
}
for i, test := range tests {
pr, err := ParsePingResponseLine(test)
So(pr, ShouldNotBeNil)
So(err, ShouldBeNil)
So(pr.Host, ShouldEqual, results[i].Host)
So(pr.IP, ShouldEqual, results[i].IP)
So(pr.ICMPSeq, ShouldEqual, results[i].ICMPSeq)
So(pr.Time, ShouldEqual, results[i].Time)
}
})
Convey("ParsePingOutput()", func() {
Convey("Should return PingResponse given a valid reply", func() {
tests := [][]byte{
// BSD ping with reply
[]byte(`PING google.com (167.206.252.108): 56 data bytes
64 bytes from 167.206.252.108: icmp_seq=0 ttl=59 time=13.886 ms
--- google.com ping statistics ---
1 packets transmitted, 1 packets received, 0.0% packet loss
round-trip min/avg/max/stddev = 13.886/13.886/13.886/0.000 ms`),
// GNU ping with reply
[]byte(`PING google.com (74.125.228.40) 56(84) bytes of data.
64 bytes from iad23s06-in-f8.1e100.net (74.125.228.40): icmp_seq=1 ttl=46 time=1.67 ms
--- google.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.674/1.674/1.674/0.000 ms`),
}
results := []PingResponse{
PingResponse{Host: "", IP: "167.206.252.108", ICMPSeq: 0, TTL: 59, Time: 13.886},
PingResponse{Host: "", IP: "74.125.228.40", ICMPSeq: 1, TTL: 46, Time: 1.67},
}
for i, test := range tests {
pr, err := ParsePingOutput(test)
So(pr, ShouldNotBeNil)
So(err, ShouldBeNil)
So(pr.Host, ShouldEqual, results[i].Host)
So(pr.IP, ShouldEqual, results[i].IP)
So(pr.ICMPSeq, ShouldEqual, results[i].ICMPSeq)
So(pr.Time, ShouldEqual, results[i].Time)
}
})
Convey("Should return destination uncreachable error given no reply", func() {
tests := [][]byte{
// BSD timeout
[]byte(`PING msn.com (23.101.196.141): 56 data bytes
--- msn.com ping statistics ---
1 packets transmitted, 0 packets received, 100.0% packet loss
`),
// GNU timeout
[]byte(` PING msn.com (23.101.196.141) 56(84) bytes of data.
--- msn.com ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms`),
}
for _, test := range tests {
pr, err := ParsePingOutput(test)
So(pr, ShouldBeNil)
So(err.Error(), ShouldEqual, DestinationUnreachableError)
}
})
})
})
}
func Test_ping_integration(t *testing.T) {
Convey("ping", t, func() {
Convey("Ping()", func() {
Convey("Should ping localhost", func() {
pr, err := Ping("localhost")
// So(pr.Host, ShouldEqual, "localhost")
So(err, ShouldBeNil)
So(pr, ShouldNotBeNil)
})
Convey("Should ping 127.0.0.1", func() {
pr, err := Ping("127.0.0.1")
// So(pr.Host, ShouldEqual, "localhost")
// So(pr.IP, ShouldEqual, "localhost")
So(err, ShouldBeNil)
So(pr, ShouldNotBeNil)
})
Convey("Should ping google", func() {
pr, err := Ping("google.com")
So(err, ShouldBeNil)
So(pr, ShouldNotBeNil)
// So(pr.Host, ShouldEqual, "google.com")
})
Convey("Should return error with invalid host", func() {
pr, err := Ping("=2lsakjf2k34")
So(err, ShouldNotBeNil)
So(pr, ShouldBeNil)
})
})
})
}
|
/*
* @lc app=leetcode.cn id=999 lang=golang
*
* [999] 可以被一步捕获的棋子数
*/
package main
// @lc code=start
func numRookCaptures(board [][]byte) int {
var rookX, rookY int
for i := 0; i < len(board); i++ {
flag := false
for j := 0; j < len(board[0]); j++ {
if board[i][j] == 'R' {
rookX, rookY = i, j
flag = true
break
}
}
if flag {
break
}
}
pawnCount := 0
i := rookX - 1
for i >= 0 {
flag := false
switch board[i][rookY] {
case '.':
i--
case 'B':
flag = true
case 'p':
pawnCount++
flag = true
}
if flag {
break
}
}
i = rookX + 1
for i < len(board) {
flag := false
switch board[i][rookY] {
case '.':
i++
case 'B':
flag = true
case 'p':
pawnCount++
flag = true
}
if flag {
break
}
}
j := rookY - 1
for j >= 0 {
flag := false
switch board[rookX][j] {
case '.':
j--
case 'B':
flag = true
case 'p':
pawnCount++
flag = true
}
if flag {
break
}
}
j = rookY + 1
for j < len(board[0]) {
flag := false
switch board[rookX][j] {
case '.':
j++
case 'B':
flag = true
case 'p':
pawnCount++
flag = true
}
if flag {
break
}
}
return pawnCount
}
// func main() {
// fmt.Println(numRookCaptures([][]byte{
// {'.', '.', '.', '.', '.', '.', '.', '.'},
// {'.', '.', '.', 'p', '.', '.', '.', '.'},
// {'.', '.', '.', 'R', '.', '.', '.', 'p'},
// {'.', '.', '.', '.', '.', '.', '.', '.'},
// {'.', '.', '.', '.', '.', '.', '.', '.'},
// {'.', '.', '.', 'p', '.', '.', '.', '.'},
// {'.', '.', '.', '.', '.', '.', '.', '.'},
// {'.', '.', '.', '.', '.', '.', '.', '.'},
// }))
// }
// @lc code=end
|
package main
import (
"log"
)
func getMaxProfit(stockPrices []int) int {
var lowestPrice int
var highestPrice int
var lowestPriceIndex int
var highestPriceIndex int
for i, price := range stockPrices {
if (price < lowestPrice || lowestPrice == 0) && i != len(stockPrices)-1 {
lowestPrice = price
lowestPriceIndex = i
highestPrice = 0
} else if (price > highestPrice) && (i > lowestPriceIndex) {
highestPrice = price
highestPriceIndex = i
}
}
profit := highestPrice - lowestPrice
log.Printf("Lowest[%v]: %v, Highest[%v]: %v Profit: %v\n", lowestPriceIndex, lowestPrice, highestPriceIndex, highestPrice, profit)
return profit
}
|
package main
import (
"os"
"fmt"
"time"
"errors"
"math/rand"
ioutil "io/ioutil"
http "net/http"
)
// HealthBadCount suck it
var HealthBadCount = 0
func statusServer() {
http.HandleFunc("/ping", pingHandle)
http.HandleFunc("/health", healthHandle)
http.HandleFunc("/healthBad", healthBadHandle)
http.HandleFunc("/pvDataReturn", pvDataReturn)
http.HandleFunc("/pvDataSet", pvDataSet)
http.HandleFunc("/apiTest", downwardAPITester)
// create server that doesn't leave things open forever
s := &http.Server{
Addr: ":8080",
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
s.ListenAndServe()
}
func pingHandle(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
fmt.Fprintf(w, "PONG\n")
}
}
func healthHandle(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
fmt.Fprintf(w, "health\n")
}
}
func pvDataReturn(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
pvData, err := ioutil.ReadFile("/mnt/cephfs/pvData")
if err != nil {
fmt.Fprintf(w, "PV not present\n")
} else {
fmt.Fprintf(w, string(pvData[:]) + "\n")
}
}
}
func pvDataSet(w http.ResponseWriter, r *http.Request){
if r.Method == "PUT" {
bytes := make([]byte, 100)
_, err := r.Body.Read(bytes)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to read PUT data\n")
}
err = ioutil.WriteFile("/mnt/cephfs/pvData", bytes, os.ModePerm)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "PV not present\n")
} else {
fmt.Fprintf(w, "Data saved")
}
}
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func downwardAPITester(w http.ResponseWriter, r *http.Request){
bytes := make([]byte, 100)
_, err := r.Body.Read(bytes)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to read PUT data\n")
}
err = ioutil.WriteFile("/var/log/containers/sample-go/" + randSeq(10), bytes, os.ModePerm)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "PV not present\n")
} else {
fmt.Fprintf(w, "Data saved")
}
}
func healthBadHandle(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
HealthBadCount++
if HealthBadCount > 15 {
w.WriteHeader(http.StatusInternalServerError)
}
fmt.Fprintf(w, "health count %d next\n", HealthBadCount)
}
}
func namedOutput(filehandle *os.File) {
counter := 0
for {
time.Sleep(time.Second)
fmt.Fprintf(filehandle, "oh look! a message on %s! id: %d\n", filehandle.Name(), counter)
counter++
}
}
func forTesting(number, multiplier int) (calculated int, err error) {
if number * multiplier > 50 {
err = errors.New("Number too high")
} else {
calculated = number * multiplier
}
return
}
func main() {
custom, err := os.Create("/lorst")
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to open custom log handle")
} else {
go namedOutput(custom)
}
go namedOutput(os.Stderr)
go namedOutput(os.Stdout)
statusServer()
}
|
package parser
import (
"errors"
"fmt"
"regexp"
"time"
"github.com/alecthomas/participle"
"github.com/alecthomas/participle/lexer"
"github.com/flynn/biscuit-go"
"github.com/flynn/biscuit-go/datalog"
)
var (
ErrVariableInFact = errors.New("parser: a fact cannot contain any variables")
)
var defaultParserOptions = []participle.Option{
participle.Lexer(lexer.DefaultDefinition),
participle.UseLookahead(3),
}
type Parser interface {
Fact(fact string) (biscuit.Fact, error)
Rule(rule string) (biscuit.Rule, error)
Caveat(caveat string) (biscuit.Caveat, error)
Must() MustParser
}
type MustParser interface {
Fact(fact string) biscuit.Fact
Rule(rule string) biscuit.Rule
Caveat(caveat string) biscuit.Caveat
}
type parser struct {
factParser *participle.Parser
ruleParser *participle.Parser
caveatParser *participle.Parser
}
var _ Parser = (*parser)(nil)
type mustParser struct {
parser Parser
}
var _ MustParser = (*mustParser)(nil)
func New() Parser {
return &parser{
factParser: participle.MustBuild(&Predicate{}, defaultParserOptions...),
ruleParser: participle.MustBuild(&Rule{}, defaultParserOptions...),
caveatParser: participle.MustBuild(&Caveat{}, defaultParserOptions...),
}
}
func (p *parser) Fact(fact string) (biscuit.Fact, error) {
parsed := &Predicate{}
if err := p.factParser.ParseString(fact, parsed); err != nil {
return biscuit.Fact{}, err
}
pred, err := convertPredicate(parsed)
if err != nil {
return biscuit.Fact{}, err
}
for _, a := range pred.IDs {
if a.Type() == biscuit.AtomTypeVariable {
return biscuit.Fact{}, ErrVariableInFact
}
}
return biscuit.Fact{Predicate: *pred}, nil
}
func (p *parser) Rule(rule string) (biscuit.Rule, error) {
parsed := &Rule{}
if err := p.ruleParser.ParseString(rule, parsed); err != nil {
return biscuit.Rule{}, err
}
r, err := convertRule(parsed)
if err != nil {
return biscuit.Rule{}, err
}
return *r, nil
}
func (p *parser) Caveat(caveat string) (biscuit.Caveat, error) {
parsed := &Caveat{}
if err := p.caveatParser.ParseString(caveat, parsed); err != nil {
return biscuit.Caveat{}, err
}
queries := make([]biscuit.Rule, len(parsed.Queries))
for i, q := range parsed.Queries {
query, err := convertRule(q)
if err != nil {
return biscuit.Caveat{}, err
}
queries[i] = *query
}
return biscuit.Caveat{
Queries: queries,
}, nil
}
func (p *parser) Must() MustParser {
return &mustParser{parser: p}
}
func (m *mustParser) Fact(fact string) biscuit.Fact {
f, err := m.parser.Fact(fact)
if err != nil {
panic(err)
}
return f
}
func (m *mustParser) Rule(rule string) biscuit.Rule {
r, err := m.parser.Rule(rule)
if err != nil {
panic(err)
}
return r
}
func (m *mustParser) Caveat(caveat string) biscuit.Caveat {
c, err := m.parser.Caveat(caveat)
if err != nil {
panic(err)
}
return c
}
func convertPredicate(p *Predicate) (*biscuit.Predicate, error) {
var atoms []biscuit.Atom
for _, a := range p.IDs {
switch {
case a.Integer != nil:
atoms = append(atoms, biscuit.Integer(*a.Integer))
case a.String != nil:
atoms = append(atoms, biscuit.String(*a.String))
case a.Symbol != nil:
atoms = append(atoms, biscuit.Symbol(*a.Symbol))
case a.Variable != nil:
atoms = append(atoms, biscuit.Variable(*a.Variable))
case a.Bytes != nil:
b, err := a.Bytes.Decode()
if err != nil {
return nil, fmt.Errorf("parser: failed to decode hex string: %v", err)
}
atoms = append(atoms, biscuit.Bytes(b))
default:
return nil, errors.New("parser: unsupported predicate, must be one of integer, string, symbol, variable, or bytes")
}
}
return &biscuit.Predicate{
Name: p.Name,
IDs: atoms,
}, nil
}
func convertConstraint(c *Constraint) (*biscuit.Constraint, error) {
var constraint *biscuit.Constraint
var err error
switch {
case c.VariableConstraint != nil:
constraint, err = convertVariableConstraint(c.VariableConstraint)
case c.FunctionConstraint != nil:
constraint, err = convertFunctionConstraint(c.FunctionConstraint)
default:
err = errors.New("parser: unsupported constraint, must be one of variable or function")
}
if err != nil {
return nil, err
}
return constraint, nil
}
func convertVariableConstraint(c *VariableConstraint) (*biscuit.Constraint, error) {
constraint := &biscuit.Constraint{
Name: biscuit.Variable(*c.Variable),
}
switch {
case c.Date != nil:
date, err := time.Parse(time.RFC3339, *c.Date.Target)
if err != nil {
return nil, err
}
switch *c.Date.Operation {
case "<":
constraint.Checker = biscuit.DateComparisonChecker{
Comparison: datalog.DateComparisonBefore,
Date: biscuit.Date(date),
}
case ">":
constraint.Checker = biscuit.DateComparisonChecker{
Comparison: datalog.DateComparisonAfter,
Date: biscuit.Date(date),
}
default:
return nil, fmt.Errorf("parser: unsupported date operation: %s", *c.Date.Operation)
}
case c.Int != nil:
switch *c.Int.Operation {
case "<":
constraint.Checker = biscuit.IntegerComparisonChecker{
Comparison: datalog.IntegerComparisonLT,
Integer: biscuit.Integer(*c.Int.Target),
}
case "<=":
constraint.Checker = biscuit.IntegerComparisonChecker{
Comparison: datalog.IntegerComparisonLTE,
Integer: biscuit.Integer(*c.Int.Target),
}
case "==":
constraint.Checker = biscuit.IntegerComparisonChecker{
Comparison: datalog.IntegerComparisonEqual,
Integer: biscuit.Integer(*c.Int.Target),
}
case ">":
constraint.Checker = biscuit.IntegerComparisonChecker{
Comparison: datalog.IntegerComparisonGT,
Integer: biscuit.Integer(*c.Int.Target),
}
case ">=":
constraint.Checker = biscuit.IntegerComparisonChecker{
Comparison: datalog.IntegerComparisonGTE,
Integer: biscuit.Integer(*c.Int.Target),
}
default:
return nil, fmt.Errorf("parser: unsupported int operation: %s", *c.Int.Operation)
}
case c.String != nil:
constraint.Checker = biscuit.StringComparisonChecker{
Comparison: datalog.StringComparisonEqual,
Str: biscuit.String(*c.String.Target),
}
case c.Bytes != nil:
switch *c.Bytes.Operation {
case "==":
b, err := c.Bytes.Target.Decode()
if err != nil {
return nil, fmt.Errorf("parser: failed to decode hex string: %v", err)
}
constraint.Checker = biscuit.BytesComparisonChecker{
Comparison: datalog.BytesComparisonEqual,
Bytes: biscuit.Bytes(b),
}
default:
return nil, fmt.Errorf("parser: unsupported bytes operation: %s", *c.Bytes.Operation)
}
case c.Set != nil:
switch {
case c.Set.Symbols != nil:
set := make(map[biscuit.Symbol]struct{}, len(c.Set.Symbols))
for _, s := range c.Set.Symbols {
set[biscuit.Symbol(s)] = struct{}{}
}
constraint.Checker = biscuit.SymbolInChecker{
Set: set,
Not: c.Set.Not,
}
case c.Set.Int != nil:
set := make(map[biscuit.Integer]struct{}, len(c.Set.Int))
for _, i := range c.Set.Int {
set[biscuit.Integer(i)] = struct{}{}
}
constraint.Checker = biscuit.IntegerInChecker{
Set: set,
Not: c.Set.Not,
}
case c.Set.String != nil:
set := make(map[biscuit.String]struct{}, len(c.Set.String))
for _, s := range c.Set.String {
set[biscuit.String(s)] = struct{}{}
}
constraint.Checker = biscuit.StringInChecker{
Set: set,
Not: c.Set.Not,
}
case c.Set.Bytes != nil:
set := make(map[string]struct{}, len(c.Set.Bytes))
for _, s := range c.Set.Bytes {
b, err := s.Decode()
if err != nil {
return nil, fmt.Errorf("parser: failed to decode hex string: %v", err)
}
set[string(b)] = struct{}{}
}
constraint.Checker = biscuit.BytesInChecker{
Set: set,
Not: c.Set.Not,
}
default:
return nil, errors.New("parser: unsupported set type, must be one of symbols, int, string, or bytes")
}
default:
return nil, errors.New("parser: unsupported variable constraint, must be one of date, int, string, bytes, or set")
}
return constraint, nil
}
func convertFunctionConstraint(c *FunctionConstraint) (*biscuit.Constraint, error) {
constraint := &biscuit.Constraint{
Name: biscuit.Variable(*c.Variable),
}
switch *c.Function {
case "prefix":
constraint.Checker = biscuit.StringComparisonChecker{
Comparison: datalog.StringComparisonPrefix,
Str: biscuit.String(*c.Argument),
}
case "suffix":
constraint.Checker = biscuit.StringComparisonChecker{
Comparison: datalog.StringComparisonSuffix,
Str: biscuit.String(*c.Argument),
}
case "match":
re, err := regexp.Compile(*c.Argument)
if err != nil {
return nil, err
}
constraint.Checker = biscuit.StringRegexpChecker(*re)
default:
return nil, fmt.Errorf("parser: unsupported function: %s", *c.Function)
}
return constraint, nil
}
func convertRule(r *Rule) (*biscuit.Rule, error) {
body := make([]biscuit.Predicate, len(r.Body))
for i, p := range r.Body {
b, err := convertPredicate(p)
if err != nil {
return nil, err
}
body[i] = *b
}
constraints := make([]biscuit.Constraint, len(r.Constraints))
for i, c := range r.Constraints {
constraint, err := convertConstraint(c)
if err != nil {
return nil, err
}
constraints[i] = *constraint
}
head, err := convertPredicate(r.Head)
if err != nil {
return nil, err
}
return &biscuit.Rule{
Head: *head,
Body: body,
Constraints: constraints,
}, nil
}
|
package main
import (
"testing"
)
func TestCalcScore(t *testing.T) {
userAns := []int{1, 2, 3, 45, 6}
examAns := []int{1, 2, 3, 45, 6}
score := calcScore(&userAns, &examAns)
if score != 5 {
t.Errorf("should be 5 by given: %v and %v", userAns, examAns)
}
userAns = []int{1, 2}
examAns = []int{1, 2}
score = calcScore(&userAns, &examAns)
if score != 2 {
t.Errorf("should be 1 by given: %v and %v", userAns, examAns)
}
}
|
package main
import "fmt"
func main() {
fmt.Println(test("abc", "cba", true))
fmt.Println(test("abc", "abd", false))
}
func permutation(a, b string) bool {
mapA := make(map[int32]int)
for _, val := range a {
mapA[val]++
}
for _, val := range b {
mapA[val]--
if mapA[val] < 0 {
return false
}
}
return true
}
func test(a, b string, expected bool) string {
ans := permutation(a, b)
if ans == expected {
return "pass"
}
return "fail"
}
|
package entity
import (
"time"
"github.com/fatih/structs"
)
type NetworkFee struct {
Id int64
TransactionId int64
MerchantId int64
FromAccountId int64
AddressId int64
Chain string
Token string
Address string
Tag string
CalculatedAmount string
ActualAmount string
HasChanged int64
Hash string
CreatedAt *time.Time
UpdatedAt *time.Time
}
func (p *NetworkFee) Map() *map[string]interface{} {
m := structs.Map(p)
return &m
}
|
package models
import "time"
type User struct {
ID int `gorm:"primary_key" json:"id"`
Username string `gorm:"column:username;type:varchar(40);unique;not null" json:”username” `
Email string `gorm:"column:email;type:varchar(40);unique;not null" json:”email” `
Password string `gorm:"column:password;type:varchar(200);not null" json:”password”`
//UserBalance []UserBalance
CreatedAt time.Time
UpdatedAt time.Time
}
|
package auth
import (
"context"
"net/http"
"github.com/google/uuid"
"github.com/micro/go-micro/v2/metadata"
)
// GenerateToken 是用户凭证的生成函数
func GenerateToken() string {
return uuid.New().String()
}
// contextKey 用于获取上下文环境
type contextKey string
// AccessTokenKey 用于从 Context 的 Metadata 中获取和设置用户会话访问凭证
const AccessTokenKey = "Access-Token"
// ZoneKey 用于从 Context 的 Metadata中获取Zone
const ZoneKey = "ClientZone"
// NameKey 用于从 Context 的 Metadata中获取Name
const NameKey = "ClientName"
// ClientIDKey 用于从 Context 的 Metadata中获取ClientID
const ClientIDKey = "ClientID"
// CustomizedCodeKey 用于从 Context 的 Metadata中获取CustomizedCode
const CustomizedCodeKey = "ClientCustomizedCode"
// RemoteClientIPKey 用于从 Context 中的 Metadata获取RemoteClientIP
const RemoteClientIPKey = "RemoteClientIP"
// AccessTokenType AccessToken类型
const AccessTokenType = "Access-Token-Type"
var (
// userIDKey 用于从 context 中获取和设置用户ID
userIDKey contextKey = "UserID"
// accountKey 用于从 context 中获取和设置账户
accountKey contextKey = "Account"
// MachineUUID 机器UUID
machineUUID contextKey = "MachineUUID"
)
// TokenFromContext 从 Context 的 Metadata 获取 token
func TokenFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
token, ok := md[http.CanonicalHeaderKey(AccessTokenKey)]
return token, ok
}
// AccessTokenTypeFromContext 从 Context 的 Metadata 获取 tokenType
func AccessTokenTypeFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
accessTokenType, ok := md[http.CanonicalHeaderKey(AccessTokenType)]
return accessTokenType, ok
}
// ZoneFromContext 从元数据获取 zone
func ZoneFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
zone, ok := md[http.CanonicalHeaderKey(ZoneKey)]
return zone, ok
}
// NameFromContext 从context中获取用户名
func NameFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
name, ok := md[http.CanonicalHeaderKey(NameKey)]
return name, ok
}
// ClientIDFromContext 从context中获取ClientID
func ClientIDFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
clientID, ok := md[http.CanonicalHeaderKey(ClientIDKey)]
return clientID, ok
}
// CustomizedCodeFromContext 从context中获取CustomizedCode
func CustomizedCodeFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
customizedCode, ok := md[http.CanonicalHeaderKey(CustomizedCodeKey)]
return customizedCode, ok
}
// RemoteClientIPFromContext 从context中获取远程客户端IP
func RemoteClientIPFromContext(ctx context.Context) (string, bool) {
md, ok := metadata.FromContext(ctx)
if !ok {
return "", false
}
ip, ok := md[http.CanonicalHeaderKey(RemoteClientIPKey)]
return ip, ok
}
// UserIDFromContext 从 context 获取 userID
func UserIDFromContext(ctx context.Context) (int32, bool) {
userID, ok := ctx.Value(userIDKey).(int32)
return userID, ok
}
// AccountFromContext 从 context 获取 account
func AccountFromContext(ctx context.Context) (string, bool) {
account, ok := ctx.Value(accountKey).(string)
return account, ok
}
// AddContextUserID 把 userID 放入 context
func AddContextUserID(ctx context.Context, userID int32) context.Context {
return context.WithValue(ctx, userIDKey, userID)
}
// AddContextAccount 把 account 放入 context
func AddContextAccount(ctx context.Context, account string) context.Context {
return context.WithValue(ctx, accountKey, account)
}
// AddContextToken 把account放入 context 的 metadata
func AddContextToken(ctx context.Context, token string) context.Context {
return metadata.NewContext(ctx, map[string]string{
AccessTokenKey: token,
})
}
// AddContextMachineUUID 把 MachineUUID 放入 context
func AddContextMachineUUID(ctx context.Context, uuid string) context.Context {
return context.WithValue(ctx, machineUUID, uuid)
}
// MachineUUIDFromContext 从 context 获取 uuid
func MachineUUIDFromContext(ctx context.Context) (string, bool) {
uuid, ok := ctx.Value(machineUUID).(string)
return uuid, ok
}
|
package main
/*
#include "httpd_cb.h"
#include <string.h>
#include <stdio.h>
static fn_client_accepted client_accepted = NULL;
static void set_httpd_cb(void* cb) {
client_accepted = (fn_client_accepted)cb;
}
static void request_coming(int client_id) {
client_accepted(client_id);
}
static void iter_env(void* iter_cb, void* udd, char* key, int keyLen, char* val, int valLen) {
fn_iter_env iter_env_cb = (fn_iter_env*)iter_cb;
iter_env_cb(udd, key, keyLen, val, valLen);
}
*/
import "C"
import (
"fmt"
"net"
"net/http"
"strings"
"io/ioutil"
"time"
"os"
"reflect"
"unsafe"
)
type Client struct {
w http.ResponseWriter
r *http.Request
status int
bytesSent int
}
type ListenParam struct {
host string
port int32
}
var (
server_started bool
listener net.Listener
starter_req chan ListenParam
starter_resp chan error
show_log bool
)
func serve_http(w http.ResponseWriter, r *http.Request) {
// fmt.Printf("client accepted in Golang\n")
var startTime, endTime time.Time
if show_log {
startTime = time.Now()
}
client := &Client{w, r, http.StatusOK, 0}
clientId := NewObjId(client)
defer FreeObjId(clientId)
C.request_coming(C.int(clientId))
if !show_log {
return
}
endTime = time.Now()
// 127.0.0.1 - - [06/Oct/2018 16:28:23] "POST / HTTP/1.1" 200 8
// Mon Jan 2 15:04:05 -0700 MST 2006
duration := endTime.Sub(startTime)
fmt.Fprintf(os.Stderr, "%s - - [%s] %v \"%s %s %s\" %d %d\n",
r.RemoteAddr,
startTime.Format("2/Jan/2006 15:04:05 -0700 MST"),
duration,
r.Method,
r.RequestURI,
r.Proto,
client.status,
client.bytesSent,
)
}
// goroutine started by init()
func listener_starter() {
// fmt.Printf("listener_starter is running\n")
http.HandleFunc("/", serve_http)
for {
param := <-starter_req
if server_started {
continue
}
var e error
if strings.HasPrefix(param.host, "unix:") {
fn := param.host[5:]
listener, e = net.Listen("unix", fn)
fmt.Fprintf(os.Stderr, "I am listening at %s\n", param.host)
} else {
server := fmt.Sprintf("%s:%d", param.host, param.port)
listener, e = net.Listen("tcp", server)
fmt.Fprintf(os.Stderr, "I am listening at %s\n", server)
}
if e != nil {
starter_resp <- e
continue
}
server_started = true
starter_resp <- nil
err := http.Serve(listener, nil)
if err != nil {
server_started = false
fmt.Fprintf(os.Stderr, "I was closed: %v\n", err)
}
}
}
func init() {
starter_req = make(chan ListenParam)
starter_resp = make(chan error)
go listener_starter()
}
// to construct C NULL
var zero uint64 = uint64(0)
func null() *C.char {
return (*C.char)(unsafe.Pointer(uintptr(zero)))
}
//export StartHttpd
func StartHttpd(host *C.char, port C.int, httpd_cb unsafe.Pointer, showLog C.int) C.int {
if server_started {
fmt.Fprintf(os.Stderr, "httpd server already started\n")
return C.int(-1)
}
show_log = (showLog != 0)
param := ListenParam{C.GoString(host), int32(port)}
starter_req <- param
err := <-starter_resp
if err != nil {
fmt.Println(err.Error())
return C.int(-2)
}
C.set_httpd_cb(httpd_cb);
return C.int(0);
}
//export StopHttpd
func StopHttpd() {
if server_started {
listener.Close()
server_started = false
}
}
func set_env(goVal *string, val **C.char, valLen *C.int) {
v := (*reflect.StringHeader)(unsafe.Pointer(goVal))
*val = (*C.char)(unsafe.Pointer(v.Data))
*valLen = C.int(v.Len)
}
//export GetReqEnv
func GetReqEnv(clientId C.int, name *C.char, val **C.char, valLen *C.int) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1);
}
client := c.(*Client)
r := client.r
n := C.GoString(name)
var goVal *string
switch n {
case "PATH_INFO":
goVal = &(r.URL.Path)
case "QUERY_STRING":
goVal = &(r.URL.RawQuery)
case "REQUEST_METHOD":
goVal = &(r.Method)
case "SERVER_PROTOCOL":
goVal = &(r.Proto)
case "REMOTE_ADDR":
goVal = &(r.RemoteAddr)
default:
vv, ok := r.Header[n]
if !ok {
return C.int(-2)
}
goVal = &(vv[0])
}
set_env(goVal, val, valLen)
return C.int(0)
}
func iter_env(iter_cb unsafe.Pointer, udd unsafe.Pointer, key, val string) {
k := (*reflect.StringHeader)(unsafe.Pointer(&key))
v := (*reflect.StringHeader)(unsafe.Pointer(&val))
C.iter_env(iter_cb, udd,
(*C.char)(unsafe.Pointer(k.Data)), C.int(k.Len),
(*C.char)(unsafe.Pointer(v.Data)), C.int(v.Len))
}
//export IterReqEnvs
func IterReqEnvs(clientId C.int, iter_cb unsafe.Pointer, udd unsafe.Pointer) {
c := GetObjById(int32(clientId))
if c == nil {
return
}
client := c.(*Client)
r := client.r
iter_env(iter_cb, udd, "PATH_INFO", r.URL.Path)
iter_env(iter_cb, udd, "QUERY_STRING", r.URL.RawQuery)
iter_env(iter_cb, udd, "REQUEST_METHOD", r.Method)
iter_env(iter_cb, udd, "SERVER_PROTOCOL", r.Proto)
iter_env(iter_cb, udd, "REMOTE_ADDR", r.RemoteAddr)
for k, vs := range r.Header {
for _, v := range vs {
iter_env(iter_cb, udd, k, v);
}
}
}
func set_body(goBody []byte, body **C.char, bodyLen *C.int) {
p := (*reflect.SliceHeader)(unsafe.Pointer(&goBody))
*body = (*C.char)(unsafe.Pointer(p.Data))
*bodyLen = C.int(p.Len)
}
//export ReadBody
func ReadBody(clientId C.int, body **C.char, bodyLen *C.int) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1)
}
client := c.(*Client)
r := client.r
switch {
case r.Method == "" || r.Method == "GET" || r.Method == "HEAD" || r.ContentLength == 0:
*body = null();
*bodyLen = C.int(0)
return C.int(0)
case r.ContentLength < 0:
return C.int(http.StatusLengthRequired)
case r.Body == nil:
return C.int(http.StatusBadRequest)
default:
b, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
return C.int(http.StatusInternalServerError)
}
set_body(b, body, bodyLen)
return C.int(0)
}
}
//export SetStatus
func SetStatus(clientId C.int, code C.int) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1)
}
client := c.(*Client)
w := client.w
status := int(code)
w.WriteHeader(status)
client.status = status
return C.int(0)
}
//export SetRespHeader
func SetRespHeader(clientId C.int, name, val *C.char) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1)
}
client := c.(*Client)
w := client.w
w.Header().Set(C.GoString(name), C.GoString(val))
return C.int(0)
}
//export AddRespHeader
func AddRespHeader(clientId C.int, name, val *C.char) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1)
}
client := c.(*Client)
w := client.w
w.Header().Add(C.GoString(name), C.GoString(val))
return C.int(0)
}
//export OutputChunk
func OutputChunk(clientId C.int, chunk *C.char, length C.int) C.int {
c := GetObjById(int32(clientId))
if c == nil {
return C.int(-1)
}
client := c.(*Client)
w := client.w
if length < 0 {
length = C.int(C.strlen(chunk))
}
// construct []byte with the same memory of chunk
var b []byte
var bs = (*reflect.SliceHeader)(unsafe.Pointer(&b))
bs.Data = uintptr(unsafe.Pointer(chunk))
bs.Len = int(length)
bs.Cap = int(length)
bytesSent, err := w.Write(b)
if err != nil {
return C.int(-1)
}
client.bytesSent += bytesSent
return C.int(bytesSent)
}
func main() {
}
|
package server
import (
"github.com/gin-gonic/gin"
"github.com/sanguohot/medichain/service"
"github.com/sanguohot/medichain/util"
"github.com/sanguohot/medichain/zap"
uberZap "go.uber.org/zap"
"io/ioutil"
"net/http"
"time"
)
func PongHandler(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"message": "pong",
})
}
func PingSleepOneSecHandler(c *gin.Context) {
time.Sleep(time.Second)
c.JSON(http.StatusOK, gin.H{
"message": "pong",
})
}
func DoJsonResponse(c *gin.Context, err error, p interface{}) {
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"Code": "FAIL",
"Message": err.Error(),
})
zap.Logger.Error(
err.Error(),
uberZap.String("remote", c.Request.RemoteAddr),
uberZap.String("method", c.Request.Method),
uberZap.String("url", c.Request.RequestURI),
uberZap.Any("form", c.Request.Form),
uberZap.Any("param", c.Params),
)
} else {
c.JSON(http.StatusOK, gin.H{
"Code": "SUCC",
"Data": p,
})
}
}
func AddUserHandler(c *gin.Context) {
idCartNo := c.PostForm("idCartNo")
orgUuidStr := c.PostForm("orgUuid")
password := c.PostForm("password")
err, obj := service.AddUser(orgUuidStr, idCartNo, password)
DoJsonResponse(c, err, obj)
}
func AddOrgHandler(c *gin.Context) {
name := c.PostForm("name")
password := c.PostForm("password")
err, obj := service.AddOrg(name, password)
DoJsonResponse(c, err, obj)
}
func AddFileHandler(c *gin.Context) {
var (
err error
fileBytes []byte
)
addressStr := c.PostForm("address")
password := c.PostForm("password")
ownerUuidStr := c.PostForm("ownerUuid")
orgUuidStr := c.PostForm("orgUuid")
fileType := c.PostForm("fileType")
fileUrl := c.PostForm("fileUrl")
fileDesc := c.PostForm("fileDesc")
sha256Hash := c.PostForm("sha256Hash")
file, fileHeader , _ := c.Request.FormFile("file")
if file != nil && fileHeader != nil && fileHeader.Size > 0 {
fileBytes, err = ioutil.ReadAll(file)
if err != nil {
DoJsonResponse(c, err, nil)
return
}
}
err, obj := service.AddFile(ownerUuidStr, orgUuidStr, addressStr, password, fileType, fileDesc, fileUrl, fileBytes, sha256Hash)
DoJsonResponse(c, err, obj)
}
func AddFileSignHandler(c *gin.Context) {
addressStr := c.PostForm("address")
password := c.PostForm("password")
fileUuidStr := c.Param("fileUuid")
keccak256Hash := c.PostForm("keccak256Hash")
err, obj := service.AddFileSign(fileUuidStr, addressStr, password, keccak256Hash)
DoJsonResponse(c, err, obj)
}
func GetFileHandler(c *gin.Context) {
fileUuidStr := c.Param("fileUuid")
err, file := service.GetFile(fileUuidStr)
if err != nil {
DoJsonResponse(c, err, nil)
return
}
c.Header("content-disposition", `attachment; filename=` + fileUuidStr)
// charset=utf-8
// application/octet-stream
defaultDetect := util.DetectContentType(file)
zap.Sugar.Infof("file %s content type auto detect ===> %s", fileUuidStr, defaultDetect)
c.Data(http.StatusOK, defaultDetect, file)
//c.Data(http.StatusOK, "text/plain;charset=utf8;gbk", file)
}
func GetFileSignerAndDataListHandler(c *gin.Context) {
fileUuidStr := c.Param("fileUuid")
startStr := c.Query("start")
limitStr := c.Query("limit")
err, list := service.GetFileSignerAndDataList(fileUuidStr, startStr, limitStr)
DoJsonResponse(c, err, list)
}
func GetFileAddLogListHandler(c *gin.Context) {
startStr := c.Query("start")
limitStr := c.Query("limit")
idCartNo := c.Query("idCartNo")
orgUuidStr := c.Query("orgUuid")
fromTimeStr := c.Query("fromTime")
toTimeStr := c.Query("toTime")
err, list := service.GetFileAddLogList(idCartNo, orgUuidStr, fromTimeStr, toTimeStr, startStr, limitStr)
DoJsonResponse(c, err, list)
}
func GetFileAddLogDetailHandler(c *gin.Context) {
fileUuid := c.Param("fileUuid")
err, detail := service.GetFileAddLogDetail(fileUuid)
DoJsonResponse(c, err, detail)
}
func CheckTransactionResultHandler(c *gin.Context) {
hashStr := c.Param("hash")
ok := service.CheckTransactionResult(hashStr)
DoJsonResponse(c, nil, struct {
TransationResult bool
}{TransationResult:ok})
}
|
package main
import (
"flag"
"github.com/pkg/profile"
"github.com/sergeyfrolov/gotapdance/tapdance"
"github.com/sergeyfrolov/gotapdance/tdproxy"
"os"
)
func main() {
defer profile.Start().Stop()
var port = flag.Int("port", 10500, "TapDance will listen for connections on this port.")
var assets_location = flag.String("assetsdir", "./assets/", "Folder to read assets from.")
flag.Parse()
tapdance.AssetsFromDir(*assets_location)
tapdanceProxy := tdproxy.NewTapDanceProxy(*port)
err := tapdanceProxy.ListenAndServe()
if err != nil {
tdproxy.Logger.Errorf("Failed to ListenAndServe(): %v\n", err)
os.Exit(1)
}
}
|
/*
Custom storage-cient (for benchmarking purposes).
Edit IPs with the IPs of the server you want to connect to
Program can be runned like this: go run main.go numberOfRequests mode(r/read/Read/READ or w/Write/write/WRITE)
@author: Andrea Esposito
*/
package main
import (
"context"
"encoding/csv"
"errors"
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
pb "github.com/AndreaEsposit/bachelors-thesis/storage_server/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/grpc"
)
var wg sync.WaitGroup
// either benchmarking type 1 or 2
var benchmarkingType = 0
// IPs is used to specify the IPs that we will connect to
var IPs []string
// 10 bytes
const con10b = "Testing!!!"
// 1kb
const con1kb = "Bruce Wayne was born to wealthy physician Dr. Thomas Wayne and his wife Martha, who were themselves members of the prestigious Wayne and Kane families of Gotham City, respectively. When he was three, Bruce's mother Martha was expecting a second child to be named Thomas Wayne, Jr. However, because of her intent to found a school for the underprivileged in Gotham, she was targeted by the manipulative Court of Owls, who arranged for her to have a car accident. She and Bruce survived, but the accident forced Martha into premature labor, and the baby was lost. While on vacation to forget about these events, the Wayne Family butler, Jarvis Pennyworth was killed by one of the Court of Owls' Talons. A letter he'd written to his son Alfred, warning him away from the beleaguered Wayne family, was never delivered. As such, Alfred - who had been an actor at the Globe Theatre at the time and a military medic before that, traveled to Gotham City to take up his father's place, serving the Waynes....."
// 1Mb
var con1Mb = strings.Repeat(con1kb, 100)
var nRequests = 0
func main() {
IPs = []string{"152.94.162.17:50051"}
//IPs = []string{"152.94.162.17:50051", "152.94.162.18:50051", "152.94.162.19:50051"}
//IPs = []string{"localhost:50051", "localhost:50052", "localhost:50053"}
clients := map[int]pb.StorageClient{}
// creates connections to each server
for i, ip := range IPs {
conn, err := grpc.Dial(ip, grpc.WithInsecure())
check(err)
fmt.Printf("Connected to: %v\n", ip)
clients[i] = pb.NewStorageClient(conn)
}
var err error
// define benchmarking type
benchmarkingType, err = strconv.Atoi(os.Args[4])
check(err)
// get number of requests
nRequests, err = strconv.Atoi(os.Args[1])
check(err)
// define benchmarking mode (read/write)
mode := os.Args[2]
var latencies []time.Duration
var doneTimes []int64 //Unix format
for nRequests != 0 {
if mode == "w" || strings.ToLower(mode) == "write" {
timep, err := ptypes.TimestampProto(time.Now())
check(err)
message := pb.WriteRequest{FileName: "test", Value: con10b, Timestamp: timep}
// run requests to all servers specified by IPs
mWrite(clients, &message, &latencies, &doneTimes)
} else if mode == "r" || strings.ToLower(mode) == "read" {
message := pb.ReadRequest{FileName: "test"}
// run requests to all servers specified by IPs
mRead(clients, &message, &latencies, &doneTimes)
}
}
// wait before you write to file
time.Sleep(10 * time.Second)
file, err := os.Create("result-client" + os.Args[3] + ".csv")
check(err)
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
err = writer.Write([]string{"Latency(MicroSeconds)", "Time(UnixFormat)"})
checkError("cannot write to file", err)
for i, value := range latencies {
s := []string{strconv.Itoa(int(value.Microseconds())), strconv.Itoa(int(doneTimes[i]))}
err := writer.Write(s)
checkError("cannot write to file", err)
}
}
// panic if error
func check(err error) {
if err != nil {
panic(err)
}
}
// fatal error making file
func checkError(message string, err error) {
if err != nil {
log.Fatal(message, err)
}
}
// mesures latency of a request
func measureTime(latencies *[]time.Duration) func() {
start := time.Now()
return func() {
*latencies = append(*latencies, time.Since(start))
}
}
func mWrite(clients map[int]pb.StorageClient, message *pb.WriteRequest, latencies *[]time.Duration, times *[]int64) {
defer measureTime(latencies)()
activeRequests := len(clients)
var lock sync.Mutex
for _, client := range clients {
wg.Add(1)
go singleWrite(client, message, &activeRequests, &lock)
}
wg.Wait()
// -1 total requests
nRequests--
*times = append(*times, time.Now().Unix())
}
func singleWrite(client pb.StorageClient, message *pb.WriteRequest, activeRequests *int, mu *sync.Mutex) {
_, err := client.Write(context.Background(), message)
check(err)
if benchmarkingType == 1 {
wg.Done()
} else if benchmarkingType == 2 {
mu.Lock() // take lock
if *activeRequests > 1 {
wg.Done()
*activeRequests-- // -1 active requests
fmt.Println(*activeRequests)
if *activeRequests == 1 {
wg.Done() // remove last one from waiting list
}
}
mu.Unlock() // relese lock
}
}
func mRead(clients map[int]pb.StorageClient, message *pb.ReadRequest, latencies *[]time.Duration, times *[]int64) {
defer measureTime(latencies)()
activeRequests := len(clients)
var lock sync.Mutex
for _, client := range clients {
wg.Add(1)
go singleRead(client, message, &activeRequests, &lock)
}
wg.Wait()
// -1 total requests
nRequests--
*times = append(*times, time.Now().Unix())
}
func singleRead(client pb.StorageClient, message *pb.ReadRequest, activeRequests *int, mu *sync.Mutex) {
res, _ := client.Read(context.Background(), message)
if res.GetOk() == 0 {
err := errors.New("file is not present in one of the servers")
panic(err)
}
if benchmarkingType == 1 {
wg.Done()
} else if benchmarkingType == 2 {
mu.Lock() // take lock
if *activeRequests > 1 {
wg.Done()
*activeRequests-- // -1 active requests
fmt.Println(*activeRequests)
if *activeRequests == 1 {
wg.Done() // remove last one from waiting list
}
}
mu.Unlock() // relese lock
}
}
|
package binomialheap
import (
"fmt"
)
type BiNode struct {
key int
degree int
child *BiNode
sibling *BiNode
parent *BiNode
}
type Blist struct {
head *BiNode
size int
}
func CreateNewHeap() *Blist {
return &Blist{head: nil, size: 0}
}
func CreateNewNode(keyval int) *BiNode {
return &BiNode{key: keyval, degree: 0, child: nil, sibling: nil, parent: nil}
}
func (bh *Blist) Insert(newnode *BiNode) {
bh.size += 1
bh.addinto(newnode)
}
func (bh *Blist) Give(val string, prior int) {
newn := CreateNewNode(prior)
bh.Insert(newn)
}
func (bh *Blist) Take() int {
return bh.Pop()
}
func (bh *Blist) Pop() int {
bh.size -= 1
track := bh.head
minival := bh.head
if minival == nil {
return minival.key
}
for track != nil {
if minival.key > track.key {
minival = track
}
track = track.sibling
}
removeFromHead(&bh.head, minival)
for _, children := range iterator(minival.child) {
removeFromHead(&minival.child, children)
bh.addinto(children)
}
return minival.key
}
func (bf *Blist) addinto(newnode *BiNode) {
//fmt.Println("inside add")
samedegrenode := checkForSameDegree(bf.head, newnode.degree)
fmt.Println("\nsame degeree returned is %v", samedegrenode)
if samedegrenode == nil {
insertToForest(&bf.head, newnode)
} else {
fmt.Println("inside same degree condition")
removeFromHead(&bf.head, samedegrenode)
freshnode := joinnode(samedegrenode, newnode)
bf.addinto(freshnode)
}
fmt.Println("/n head of list is %v", bf.head)
}
func joinnode(n1 *BiNode, n2 *BiNode) *BiNode {
if n1.degree < n2.degree {
n1.degree += 1
n1.adopt(n2)
return n1
} else {
n2.degree += 1
n2.adopt(n1)
return n2
}
}
func (n1 *BiNode) adopt(n2 *BiNode) {
insertToForest(&n1.sibling, n2)
n2.parent = n1
}
func removeFromHead(bf **BiNode, samedegreenode *BiNode) {
tracknode := getleftnode(*bf, samedegreenode)
if tracknode == nil {
*bf = samedegreenode.sibling
fmt.Println("INSIDE sibling")
} else {
tracknode.sibling = samedegreenode.sibling
}
samedegreenode.sibling = nil
}
func getleftnode(head *BiNode, node *BiNode) *BiNode {
if head == node {
return nil
}
checknode := head
for checknode.sibling != node {
checknode = checknode.sibling
}
return checknode
}
func checkForSameDegree(bf *BiNode, newdegree int) *BiNode {
cnode := bf
fmt.Println("\n value of bf is %v", bf)
for cnode != nil {
if cnode.degree == newdegree {
return cnode
}
fmt.Println("cnode is %v", cnode)
cnode = cnode.sibling
}
return nil
}
func insertToForest(bf **BiNode, newnode *BiNode) {
var prev *BiNode
var next *BiNode
prev = nil
next = *bf
for next != nil && newnode.degree < next.degree {
prev = next
next = next.sibling
}
if prev == nil && next == nil {
fmt.Println("inside both nil")
*bf = newnode
} else if prev != nil && next == nil {
//fmt.Println("INSIDE 1")
prev.sibling = newnode
} else if prev == nil && next != nil {
newnode.sibling = *bf
*bf = newnode
} else if prev != nil && next != nil {
prev.sibling = newnode
newnode.sibling = next
}
}
func iterator(parent *BiNode) []*BiNode {
arr := make([]*BiNode, 0)
track := parent
for track != nil {
arr = append(arr, track)
track = track.sibling
}
fmt.Println("/narray is %v", arr)
return arr
}
func (n1 *BiNode) PrintNode() {
fmt.Printf("/n KEY: %d degree: %d sibling is %v", n1.key, n1.degree, n1.sibling)
}
func (bf *BiNode) Print_Level() {
fmt.Printf("bf is %v", bf)
bf.PrintNode()
}
func (bh *Blist) PrintValue() {
if bh.head == nil {
fmt.Print("heap is empty.")
}
fmt.Println("\n head is %v", bh.head)
for _, node := range iterator(bh.head) {
fmt.Printf("value of node is %v", node)
node.Print_Level()
}
}
|
package main
import (
"github.com/qiniu/db/mgoutil.v3"
"gopkg.in/mgo.v2/bson"
)
type M map[string]interface{}
type User struct {
UserId bson.ObjectId `json:"userId" bson:"_id"`
Phone string `json:"phone" bson:"phone"` //用户ID
Name string `json:"name" bson:"name"` //用户名
Avatar string `json:"avatar" bson:"avatar"`
Dealed bool `json:"dealed" bson:"dealed"`
CreatedAt string `json:"createdAt" bson:"createdAt"`
}
type ZuoPin struct {
Id string `json:"id" bson:"_id"`
UserId string `json:"userId" bson:"userId"` //用户ID
Name string `json:"name" bson:"name"`
Num string `json:"num" bson:"num"`
ZPName string `json:"zpName" bson:"zpName"`
Stars int `json:"stars" bson:"stars"`
Images []string `json:"images" bson:"images"`
CreateAt string `json:"createAt" bson:"createAt"`
}
type Lottery struct {
Id string `json:"id" bson:"_id"`
Name string `json:"name" bson:"name"`
Phone string `json:"phone" bson:"phone"`
CreateAt string `json:"createAt" bson:"createAt"`
}
type CurrNum struct {
Type string `json:"type" bson:"type"`
Num int `json:"num" bson:"num"`
}
type Collections struct {
CurrNumColl mgoutil.Collection `coll:"currNum"`
UserColl mgoutil.Collection `coll:"users"`
ZuoPinColl mgoutil.Collection `coll:"zuopins"`
LotterysColl mgoutil.Collection `coll:"lotterys"`
}
func (db *Collections) EnsureIndex() {
}
|
package oauth2
import (
"errors"
)
type ReferenceToken interface {
TokenID() string
ClientID() string
Expiry() int64
AccessToken() string
}
// referenceToken stores all relevant information for reference tokens
type referenceToken struct {
tokenID string
clientID string
expiry int64
accessToken string
}
func (r referenceToken) TokenID() string {
return r.tokenID
}
func (r referenceToken) ClientID() string {
return r.clientID
}
func (r referenceToken) Expiry() int64 {
return r.expiry
}
func (r referenceToken) AccessToken() string {
return r.accessToken
}
// NewReferenceToken creates a reference token
func NewReferenceToken(tokenID string, clientID string, expiry int64, accessToken string) (ReferenceToken, error) {
if len(tokenID) < 1 {
return nil, errors.New("tokenID is required")
}
if len(clientID) < 1 {
return nil, errors.New("clientID is required")
}
if len(accessToken) < 1 {
return nil, errors.New("accessToken is required")
}
return referenceToken{
tokenID: tokenID,
clientID: clientID,
expiry: expiry,
accessToken: accessToken,
}, nil
}
|
package common
import "time"
type Report struct {
Timestamp time.Time
Message string
PhotoId string
PhotoCaption string
Type string
}
type ReportInfo struct {
Timestamp time.Time
Message string
PhotoId string
PhotoCaption string
Type string
Latitude float64
Longitude float64
Dist string
}
|
package main
import (
"bufio"
"fmt"
"os"
)
// readLines reads a whole file into memory
// and returns a slice of its lines.
func readLines(path string) ([][]rune, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines [][]rune
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, []rune(scanner.Text()))
}
return lines, scanner.Err()
}
func hits(data [][]rune, right, down int) int {
trees := 0
for i, r := range data {
if i%down == 0 {
x := ((i / down) * right) % len(r)
if r[x] == '#' {
trees++
}
}
}
fmt.Printf("Right %d, down %d -> Trees = %d\n", right, down, trees)
return trees
}
func main() {
data, err := readLines("input.txt")
if err != nil {
panic(err)
}
o11 := hits(data, 1, 1)
o31 := hits(data, 3, 1)
o51 := hits(data, 5, 1)
o71 := hits(data, 7, 1)
o12 := hits(data, 1, 2)
fmt.Printf("result = %d\n", o11*o31*o51*o71*o12)
}
|
package main
import (
"bytes"
"container/list"
"crypto/rand"
"encoding/hex"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"sync"
"time"
"github.com/op/go-logging"
"github.com/ugorji/go/codec"
"gopkg.in/redis.v3"
)
var logger = logging.MustGetLogger("cachier")
var format = logging.MustStringFormatter(
`%{color}%{time:15:04:05.000} ▶ %{level:.4s} %{color:reset} %{message}`,
)
// ProxyOptions ...
type ProxyOptions struct {
ValuePrefix string
LockPrefix string
TopicName string
CookieName string
BackendURL string
RenderTimeout time.Duration
CacheDuration time.Duration
StatsInterval time.Duration
LogLevel int
}
// Proxy ...
type Proxy struct {
PubSub *redis.PubSub
RedisClient *redis.Client
Mutex *sync.Mutex
Channels map[string]*list.List
Options *ProxyOptions
StatsTicker *time.Ticker
reverse *httputil.ReverseProxy
httpClient *http.Client
backendURL *url.URL
encoderHandle codec.Handle
}
type proxyListener struct {
URL string
Done chan *renderEvent
Element *list.Element
}
type renderResult struct {
Result []byte
Status int
}
type renderEvent struct {
URL string `codec:"u"`
Location string `codec:"l"`
Status int `codec:"s"`
}
// NewProxy ...
func NewProxy(redisClient *redis.Client, options *ProxyOptions) *Proxy {
if len(options.BackendURL) == 0 {
logger.Panic("BackendURL is empty")
}
if len(options.LockPrefix) == 0 {
options.LockPrefix = "lock-"
}
if len(options.ValuePrefix) == 0 {
options.ValuePrefix = "value-"
}
if len(options.TopicName) == 0 {
options.TopicName = "cachier"
}
if options.RenderTimeout == 0 {
options.RenderTimeout = 5 * time.Second
}
if options.CacheDuration == 0 {
options.CacheDuration = 5 * time.Second
}
var ticker *time.Ticker
if options.StatsInterval != 0 {
ticker = time.NewTicker(options.StatsInterval)
}
logger.Infof("init %v", options)
pubsub := redisClient.PubSub()
pubsub.Subscribe(options.TopicName)
backendURL, err := url.Parse(options.BackendURL)
if err != nil {
log.Panic(err)
}
proxy := &Proxy{
Channels: make(map[string]*list.List),
RedisClient: redisClient,
PubSub: pubsub,
Mutex: &sync.Mutex{},
Options: options,
StatsTicker: ticker,
backendURL: backendURL,
reverse: httputil.NewSingleHostReverseProxy(backendURL),
httpClient: &http.Client{
Timeout: options.RenderTimeout,
},
encoderHandle: &codec.MsgpackHandle{},
}
if ticker != nil {
go func() {
for range ticker.C {
stats := proxy.GetStats()
logger.Info("stats:", stats)
}
}()
}
go proxy.readRenderEvents()
return proxy
}
// Close ...
func (proxy Proxy) Close() {
if proxy.StatsTicker != nil {
proxy.StatsTicker.Stop()
}
if err := proxy.PubSub.Unsubscribe(proxy.Options.TopicName); err != nil {
logger.Panic(err)
}
}
func (proxy Proxy) lock() {
proxy.Mutex.Lock()
}
func (proxy Proxy) unlock() {
proxy.Mutex.Unlock()
}
func (proxy Proxy) makeLockVal() string {
var bytes = make([]byte, 8)
rand.Read(bytes)
return hex.EncodeToString(bytes)
}
func (proxy Proxy) closeListener(listener *proxyListener) {
proxy.lock()
defer proxy.unlock()
old := proxy.Channels[listener.URL]
old.Remove(listener.Element)
if old.Len() == 0 {
delete(proxy.Channels, listener.URL)
}
close(listener.Done)
}
func (proxy Proxy) makeListener(url string) *proxyListener {
proxy.lock()
defer proxy.unlock()
channel := make(chan *renderEvent)
old, ok := proxy.Channels[url]
var element *list.Element
if ok {
element = old.PushBack(channel)
} else {
new := list.New()
element = new.PushBack(channel)
proxy.Channels[url] = new
}
return &proxyListener{
URL: url,
Done: channel,
Element: element,
}
}
func (proxy Proxy) writeResult(writer http.ResponseWriter, result *renderResult) {
if result.Status == 0 {
logger.Critical("result status is 0")
}
if result.Status == 503 {
writer.WriteHeader(503)
writer.Write([]byte("503"))
} else {
writer.WriteHeader(result.Status)
writer.Write(result.Result)
}
}
func (proxy Proxy) makeBackendRequest(request *http.Request, result *renderResult) {
backendURL := url.URL{
Scheme: proxy.backendURL.Scheme,
Host: proxy.backendURL.Host,
Path: request.URL.Path,
RawQuery: request.URL.RawQuery,
}
response, err := proxy.httpClient.Get(backendURL.String())
if err != nil {
result.Status = 503
} else {
result.Status = response.StatusCode
result.Result, err = ioutil.ReadAll(response.Body)
if err != nil {
result.Status = 503
}
}
}
func (proxy Proxy) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
url := request.URL.RequestURI()
{
passthrough := request.Method != http.MethodGet && request.Method != http.MethodHead && request.Method != http.MethodOptions
if !passthrough {
if len(proxy.Options.CookieName) != 0 {
if _, err := request.Cookie(proxy.Options.CookieName); err == nil {
passthrough = true
}
}
}
if passthrough {
logger.Debugf("passthrough %s", url)
proxy.reverse.ServeHTTP(writer, request)
return
}
}
lockKey := proxy.Options.LockPrefix + url
valueKey := proxy.Options.ValuePrefix + url
lockVal := proxy.makeLockVal()
var result renderResult
listener := proxy.makeListener(url)
cachedBytes, err := proxy.RedisClient.Get(valueKey).Bytes()
if err == nil {
logger.Debugf("cached %s", url)
proxy.closeListener(listener)
codec.NewDecoderBytes(cachedBytes, proxy.encoderHandle).MustDecode(&result)
} else {
aquired, err := proxy.RedisClient.SetNX(lockKey, lockVal, proxy.Options.RenderTimeout).Result()
if err != nil {
logger.Panic(err)
}
if aquired {
proxy.closeListener(listener)
logger.Debugf("render %s", url)
proxy.makeBackendRequest(request, &result)
buffer := bytes.NewBuffer([]byte{})
encoder := codec.NewEncoder(buffer, proxy.encoderHandle)
encoder.MustEncode(result)
if result.Status == 200 {
if err := proxy.RedisClient.Set(valueKey, buffer.Bytes(), proxy.Options.CacheDuration).Err(); err != nil {
logger.Panic(err)
}
}
if err := proxy.RedisClient.Del(lockKey).Err(); err != nil {
logger.Panic(err)
}
eventBuffer := bytes.NewBuffer([]byte{})
event := renderEvent{
URL: url,
Status: result.Status,
}
codec.NewEncoder(eventBuffer, proxy.encoderHandle).MustEncode(&event)
logger.Debugf("render %s done", url)
if err := proxy.RedisClient.Publish(proxy.Options.TopicName, eventBuffer.String()).Err(); err != nil {
logger.Panic(err)
}
} else {
logger.Debugf("wait for %s", url)
select {
case event := <-listener.Done:
logger.Debugf("resolved %s", url)
if event.Status == 200 {
cachedBytes, err := proxy.RedisClient.Get(valueKey).Bytes()
if err != nil {
logger.Panic(err)
}
codec.NewDecoderBytes(cachedBytes, proxy.encoderHandle).MustDecode(&result)
} else {
result = renderResult{Status: 503}
}
case <-time.After(proxy.Options.RenderTimeout):
proxy.closeListener(listener)
logger.Debugf("timeout %s", url)
result = renderResult{Status: 503}
}
}
}
proxy.writeResult(writer, &result)
}
func (proxy Proxy) handleRenderMessage(event *renderEvent) {
proxy.lock()
defer proxy.unlock()
channels, ok := proxy.Channels[event.URL]
if ok {
logger.Debugf("%s ready", event.URL)
for e := channels.Front(); e != nil; e = e.Next() {
channel := e.Value.(chan *renderEvent)
channel <- event
close(channel)
}
delete(proxy.Channels, event.URL)
}
}
// GetStats ...
func (proxy Proxy) GetStats() map[string]int {
proxy.lock()
defer proxy.unlock()
res := make(map[string]int)
for url := range proxy.Channels {
res[url] = proxy.Channels[url].Len()
}
return res
}
func (proxy Proxy) readRenderEvents() {
for {
msg, err := proxy.PubSub.ReceiveMessage()
if err != nil {
logger.Panic(err)
}
var event renderEvent
codec.NewDecoderBytes([]byte(msg.Payload), proxy.encoderHandle).MustDecode(&event)
proxy.handleRenderMessage(&event)
}
}
func main() {
var redisHost string
var redisPort int
var redisDB int64
var backendURL string
var bind string
var statsInterval int64
var logLevel string
var cacheDuration int64
var renderTimeout int64
flag.StringVar(&backendURL, "backendURL", "http://127.0.0.1:3000", "backend url")
flag.StringVar(&redisHost, "redisHost", "127.0.0.1", "redis host")
flag.IntVar(&redisPort, "redisPort", 6379, "redis port")
flag.Int64Var(&redisDB, "redisDB", 0, "redis database to use (default 0)")
flag.StringVar(&bind, "bind", ":8080", "host:port or :port to listen")
flag.Int64Var(&cacheDuration, "cacheDuration", 5000, "cache duration, ms")
flag.Int64Var(&renderTimeout, "renderTimeout", 5000, "render timeout, ms")
flag.Int64Var(&statsInterval, "statsInterval", 0, "dump stats interval, ms (default disabled)")
flag.StringVar(&logLevel, "logLevel", "WARNING", "log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
flag.Parse()
logBackend := logging.NewBackendFormatter(logging.NewLogBackend(os.Stderr, "", 0), format)
leveled := logging.AddModuleLevel(logBackend)
levels := map[string]logging.Level{
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
leveled.SetLevel(levels[logLevel], "cachier")
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%v:%v", redisHost, redisPort),
DB: redisDB,
})
_, err := client.Ping().Result()
if err != nil {
logger.Panic(err)
}
proxy := NewProxy(client, &ProxyOptions{
BackendURL: backendURL,
CacheDuration: time.Duration(cacheDuration) * time.Millisecond,
RenderTimeout: time.Duration(renderTimeout) * time.Millisecond,
StatsInterval: time.Duration(statsInterval) * time.Millisecond,
})
logger.Infof("listen %v", bind)
http.ListenAndServe(bind, proxy)
}
|
package redisClient
import (
"fmt"
"github.com/garyburd/redigo/redis"
)
/**
* 向redis中写入一个key—val类型的字符串
* @key string 参数的主键
* @val string 需要写入缓存的值
* @Ex int 超时时间(秒)
参数为0时,永远不过期
* return 返回影响行数
*/
func (this *RedisPool) StringWrite(key string, val string, EX int) {
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
_, err = c.Do("SET", key, val)
if EX > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("StringWrite->" + err.Error())
}
}
/**
* 向redis中写入一个key—val类型的字符串
* @key string 参数的主键
* @val string 需要写入缓存的值
* @Ex int 超时时间(秒)
参数为0时,永远不过期
* return 返回影响行数
*/
func (this *RedisPool) NXStringWrite(key string, val string, EX int) int {
var result = 0
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
result, err = redis.Int(c.Do("SETNX", key, val))
if EX > 0 && result > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("NXStringWrite->" + err.Error())
}
return result
}
/**
* 读取一个key的字符串
*/
func (this *RedisPool) StringRead(key string) string {
key = this.prefix + ":" + key
res := ""
//return res
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
res, _ = redis.String(c.Do("GET", key))
return res
}
/**
* 写入一个int值到redis
* @param key 健名
* @param val 健值
* @param Ex 超时时间(秒)
参数为0时,永远不过期
* return 返回影响行数
*/
func (this *RedisPool) IntWrite(key string, val int, EX int) int {
var result = 0
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
result, err = redis.Int(c.Do("SETNX", key, val))
if EX > 0 && result > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("IntWrite->" + err.Error())
}
return result
}
func (this *RedisPool) IntRead(key string) int {
key = this.prefix + ":" + key
res := 0
//return res
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
res, _ = redis.Int(c.Do("GET", key))
return res
}
/**
* 写入一个bool值到redis
* @param key 健名
* @param val 健值
* @param Ex 超时时间(秒)
参数为0时,永远不过期
* return 返回影响行数
*/
func (this *RedisPool) BoolWrite(key string, val bool, EX int) int {
var result = 0
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
result, err = redis.Int(c.Do("SETNX", key, val))
if EX > 0 && result > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("BoolWrite->" + err.Error())
}
return result
}
func (this *RedisPool) BoolRead(key string) bool {
key = this.prefix + ":" + key
res := false
//return res
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
res, _ = redis.Bool(c.Do("GET", key))
return res
}
/**
* 写入一个byte值到redis
* @param key 健名
* @param val 健值
* @param Ex 超时时间(秒)
参数为0时,永远不过期
* return 返回插入是否成功
*/
func (this *RedisPool) BytesWrite(key string, val []byte, EX int) int {
var result = 0
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
result, err = redis.Int(c.Do("SETNX", key, val))
if EX > 0 && result > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("BytesWrite->" + err.Error())
}
return result
}
func (this *RedisPool) BytesRead(key string) []byte {
key = this.prefix + ":" + key
var res []byte
//return res
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
if debug == true {
fmt.Println("GET->" + key)
}
res, _ = redis.Bytes(c.Do("GET", key))
return res
}
/**
* 写入一个float64值到redis
* @param key 健名
* @param val 健值
* @param Ex 超时时间(秒)
参数为0时,永远不过期
* return 返回影响行数
*/
func (this *RedisPool) Float64Write(key string, val float64, EX int) int {
var result = 0
key = this.prefix + ":" + key
var err error
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
result, err = redis.Int(c.Do("SETNX", key, val))
if EX > 0 && result > 0 {
this.KeyExpire(key, EX, 0)
}
if err != nil {
fmt.Println("Float64Write->" + err.Error())
}
return result
}
func (this *RedisPool) Float64Read(key string) float64 {
key = this.prefix + ":" + key
var res float64
//return res
// 从连接池里面获得一个连接
c := this.getCon()
// 连接完关闭,其实没有关闭,是放回池里,也就是队列里面,等待下一个重用
defer c.Close()
res, _ = redis.Float64(c.Do("GET", key))
return res
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
)
func main() {
fmt.Println("Starting our app")
response, err := http.Get("https://api.coinbase.com/v2/prices/BTC-USD/buy")
if err != nil {
log.Fatal("The Http request failed with an error", err)
}
data, _ := ioutil.ReadAll(response.Body)
fmt.Println(string(data))
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
result := "50"
fmt.Println(strconv.Atoi(result))
}
|
package main
import (
"image"
"image/color"
"os"
"image/png"
)
var img = image.NewRGBA(image.Rect(0,0,500,500)) //area of figure
var col color.Color
func main() {
col = color.RGBA{0, 0, 255, 255} // Red
//VLine(10, 20, 80)
//HLine(10, 20, 80)
col = color.RGBA{255, 0, 0, 255} // Green
//Rect(0,0,100,100)
col = color.RGBA{255, 0, 0, 255}
circle(0,0,300)
f, err := os.Create("draw.png")
if err != nil {
panic(err)
}
defer f.Close()
png.Encode(f,img) //To write the image in file
}
// HLine draws a horizontal line
func HLine(x1, y, x2 int) {
for ; x1 <= x2; x1++ {
img.Set(x1, y, col)
}
}
// VLine draws a veritcal line
func VLine(x, y1, y2 int) {
for ; y1 <= y2; y1++ {
img.Set(x, y1, col)
}
}
// Rect draws a rectangle utilizing HLine() and VLine()
func Rect(x1, y1, x2, y2 int) {
HLine(x1, y1, x2)
HLine(x1, y2, x2)
VLine(x1, y1, y2)
VLine(x2, y1, y2)
}
func circle(x,y,r int){
x=x+r
y=0
for x>=0{
px := (x+(x-1))/2
py := y+1
if (px*px + py*py)<(r*r){
img.Set(x,y+1, col)
}else {
img.Set(x-1,y+1, col)
x--
}
y++
if x-y<1{
break
}
}
y=x
x=0
for y>=0{
px := x+1
py := (y+(y+1))/2
if (px*px + py*py)<(r*r){
img.Set(x+1,y, col)
}else {
img.Set(x+1,y+1, col)
y--
}
x++
if y==x{
break
}
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/10/9 8:41 上午
# @File : lt_125_验证回文字符串.go
# @Description :
# @Attention :
*/
package offer
import "strings"
// 解题关键: 首尾双指针遍历即可
func isPalindrome(s string) bool {
ret := ""
for i := range s {
if isalnum(s[i]) {
ret += string(s[i])
}
}
s = strings.ToLower(ret)
for i, j := 0, len(s)-1; i < j; {
if s[i] != s[j] {
return false
}
i++
j--
}
return true
}
func isalnum(ch byte) bool {
return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || (ch >= '0' && ch <= '9')
}
|
package logger
import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/labstack/echo/v4"
"github.com/sirupsen/logrus"
"gopkg.in/natefinch/lumberjack.v2"
)
var hasLoad = false
var lg = logrus.New()
// loadConfig - Load intial config
func loadConfig() {
lg.SetReportCaller(true)
f := "/log.log"
if os.Getenv("log_file") != "" {
f = "/" + os.Getenv("log_file")
}
lg.SetOutput(&lumberjack.Logger{
Filename: os.Getenv("dir_log") + f,
MaxSize: 15, // in megabytes
MaxBackups: 0,
MaxAge: 0, // in days
Compress: true, // disabled by default
})
hasLoad = true
}
// trace - Backtrace log
func trace(stack int) string {
pc := make([]uintptr, 10) // at least 1 entry needed
runtime.Callers(stack, pc)
f := runtime.FuncForPC(pc[0])
file, line := f.FileLine(pc[0])
dir := os.Getenv("dir_root")
file = strings.Replace(file, dir, "", 1)
return fmt.Sprintf("%s:%d %s\n", file, line, f.Name())
}
// MakeLogEntry - Write log to file and/or print out
func MakeLogEntry(c echo.Context, doTrace bool) *logrus.Entry {
if !hasLoad {
loadConfig()
}
f := map[string]interface{}{
"at": time.Now().UTC().Format("2006-01-02 15:04:05"),
}
if doTrace {
f["trace3"] = trace(3)
f["trace4"] = trace(4)
}
if c != nil {
f["method"] = c.Request().Method
f["uri"] = c.Request().URL.String()
f["ip"] = c.Request().RemoteAddr
f["real_ip"] = c.Request().Header.Get("X-Real-Ip")
f["proxy_ip"] = c.Request().Header.Get("X-Proxy-Ip")
}
return lg.WithFields(f)
}
// PrintLogEntry - Print log entry to stdout
func PrintLogEntry(t string, s string, doTrace bool) {
fmt.Printf("%s | %s \n", time.Now().UTC().Format("2006-01-02 15:04:05 -0700 MST"), s)
if t == "info" {
MakeLogEntry(nil, doTrace).Info(s)
} else {
MakeLogEntry(nil, doTrace).Error(s)
}
}
// EchoLogger -
func EchoLogger(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
MakeLogEntry(c, false).Info("incoming request")
return next(c)
}
}
|
package dingtalk
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"prometheus-alertmanager-dingtalk/config"
"prometheus-alertmanager-dingtalk/zaplog"
)
func NewDingTalk() *DingTalk {
d := &DingTalk{
Uri: config.GetDingTalkUri(),
HC: &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 5,
IdleConnTimeout: 30 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
},
Timeout: 5 * time.Second,
},
}
d.SecuritySettingsType = config.GetSecuritySettingsType()
d.SecretKey = config.GetSecretKey()
return d
}
type DingTalk struct {
HC *http.Client
Uri string
// DingTalk Security Settings Type
// 1. CustomKeywords 最多可以设置10个关键词,消息中至少包含其中1个关键词才可以发送成功
// 2. Endorsement 加签, 需要提供 SecretKey
// 3. IPAddressSegment 只有来自IP地址范围内的请求才会被正常处理
SecuritySettingsType string
SecretKey string
}
// send to DingTalk json body struct
type Notification struct {
MessageType string `json:"msgtype"`
Markdown *NotificationMarkdown `json:"markdown,omitempty"`
At *NotificationAt `json:"at,omitempty"`
}
type NotificationAt struct {
AtMobiles []string `json:"atMobiles,omitempty"`
IsAtAll bool `json:"isAtAll,omitempty"`
}
type NotificationMarkdown struct {
Title string `json:"title"`
Text string `json:"text"`
}
func (d *DingTalk) MakeTimestamp() string {
return strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10)
}
func (d *DingTalk) HmacSha256Base64Encode(timestamp string) string {
stringToSign := strings.Join([]string{timestamp, d.SecretKey}, "\n")
sig := hmac.New(sha256.New, []byte(d.SecretKey))
sig.Write([]byte(stringToSign))
return base64.StdEncoding.EncodeToString(sig.Sum(nil))
}
func (d *DingTalk) BuildAlertManagerMessagePayload(r *http.Request) (*bytes.Reader, error) {
payload, err := ioutil.ReadAll(r.Body)
defer func() {
if err := r.Body.Close(); err != nil {
return
}
}()
if err != nil {
return nil, err
}
alertManagerMessage := NewAlertManagerMessage()
if err := json.Unmarshal(payload, &alertManagerMessage); err != nil {
return nil, err
}
if alertManagerMessage.Status == "firing" {
alertManagerMessage.FilterFiringInformation()
}
title, text, err := alertManagerMessage.GenerateDingTalkTitleAndText()
if err != nil {
return nil, err
}
notification, notificationAt, notificationMarkdown := new(Notification), new(NotificationAt), new(NotificationMarkdown)
notificationAt.IsAtAll = true
notificationMarkdown.Title, notificationMarkdown.Text = title, text
notification.MessageType = "markdown"
notification.At = notificationAt
notification.Markdown = notificationMarkdown
requestBody, err := json.Marshal(notification)
if err != nil {
return nil, err
}
zaplog.Logger.Debug("Build AlertManagerMessage Payload From AlertManager Message Completed !",
zap.String("Status", alertManagerMessage.Status),
zap.String("AlertName", alertManagerMessage.GroupLabels.AlertName),
)
return bytes.NewReader(requestBody), nil
}
func (d *DingTalk) SendAlertManagerMessage(r *http.Request) error {
body, err := d.BuildAlertManagerMessagePayload(r)
if err != nil {
return err
}
req, err := http.NewRequest("POST", d.Uri, body)
if err != nil {
return err
}
if d.SecuritySettingsType == "Endorsement" {
timestamp := d.MakeTimestamp()
base64EncodeSign := d.HmacSha256Base64Encode(timestamp)
q := req.URL.Query() // Get a copy of the query values.
q.Add("timestamp", timestamp) // Add query timestamp
q.Add("sign", base64EncodeSign) // Add query sign
req.URL.RawQuery = q.Encode() // Encode and assign back to the original query.
}
req.Header.Set("Content-Type", "application/json")
resp, err := d.HC.Do(req)
if resp != nil {
defer func() {
if err := resp.Body.Close(); err != nil {
return
}
}()
}
if err != nil {
return err
}
if resp.StatusCode != 200 {
return errors.Errorf("Send Message Response StatusCode is not 200, StatusCode: %d", resp.StatusCode)
}
payload, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var response struct {
ErrorCode int `json:"errcode"`
ErrorMessage string `json:"errmsg"`
}
if err := json.Unmarshal(payload, &response); err != nil {
return err
}
if response.ErrorCode != 0 {
return errors.Errorf("Send Message Response ErrorCode is not zero, ErrorCode: %d, ErrorMessage: %s",
response.ErrorCode, response.ErrorMessage)
}
zaplog.Logger.Debug("Send AlertManagerMessage Payload To DingTalk Completed !")
return nil
}
|
package first
import (
"go/token"
"github.com/bunniesandbeatings/go-flavor-parser/architecture"
)
type Context struct {
Filename string
Package *architecture.Package
Fset *token.FileSet
}
|
package main
import (
"fmt"
"time"
)
func write(ch chan int) {
for i := 0; i < 100; i++ {
ch <- i
fmt.Println("generate ", i)
}
}
func read(ch chan int) {
for {
b := <-ch
fmt.Println(b)
time.Sleep(10 * time.Millisecond)
}
}
func main() {
chann := make(chan int, 10)
go write(chann)
go read(chann)
time.Sleep(5 * time.Second)
fmt.Println("----------------------")
test()
}
func test() {
ch := make(chan int,10)
exitCh := make(chan struct{}, 2)
go send(ch, exitCh)
go recv(ch, exitCh)
//等待所有goroute退出【采用计数方法比较稳】
for i := 0; i < 2; i++{
<- exitCh
}
}
func send(ch chan int, exitCh chan struct{}) {
for i := 0; i < 10; i++ {
ch <- i
fmt.Println("generate ", i)
}
//记得关闭
close(ch)
exitCh <- struct{}{}
}
func recv(ch chan int, exitCh chan struct{}) {
for {
v, ok := <- ch
if !ok {
break
}
fmt.Println(v)
}
exitCh <- struct{}{}
}
|
package main
func sortArray(nums []int) []int {
quickSort2(0, len(nums)-1, nums)
return nums
}
func partition4(i, j int, nums []int) int {
key := nums[i]
left := i
for i < j {
for i < j && nums[j] > key {
j--
}
// 注意先后顺序不能颠倒
for i < j && nums[i] <= key {
i++
}
if i < j {
nums[i], nums[j] = nums[j], nums[i]
}
}
nums[i], nums[left] = nums[left], nums[i]
return i
}
func quickSort2(i, j int, nums []int) {
if i >= j {
return
}
index := partition4(i, j, nums)
quickSort2(i, index-1, nums)
quickSort2(index+1, j, nums)
}
|
package main
import (
"errors"
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"strconv"
"syscall"
"time"
"github.com/pkg/profile"
"github.com/scottshotgg/proximity/pkg/buffs"
grpc_node "github.com/scottshotgg/proximity/pkg/node/grpc"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
func PrintMemUsage() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
fmt.Printf("\n%+v\n", m)
fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
fmt.Printf("\tNumGC = %v\n", m.NumGC)
fmt.Println()
}
func bToMb(b uint64) uint64 {
return b / 1024 / 1024
}
func main() {
defer profile.Start(profile.GoroutineProfile, profile.ProfilePath("profile.p")).Stop()
// go func() {
// for {
// PrintMemUsage()
// runtime.GC()
// time.Sleep(2 * time.Second)
// }
// }()
start()
// var err = start()
// if err != nil {
// // TODO: need to wrap errors
// log.Fatalln("err start:", err)
// }
}
// Get preferred outbound ip of this machine
func getOutboundIP(host, port string) (net.IP, error) {
if host == "" {
host = "8.8.8.8"
port = "80"
}
conn, err := net.Dial("udp", net.JoinHostPort(host, port))
if err != nil {
return nil, err
}
defer conn.Close()
localAddr, ok := conn.LocalAddr().(*net.UDPAddr)
if !ok {
return nil, errors.New("localAddr was not a *net.UDPAddr")
}
return localAddr.IP, nil
}
func start() error {
const (
host = ""
port = "5001"
mb = 1024 * 1024
everySecond = 1 * time.Second
maxMsgMB = 100
maxMsgSize = maxMsgMB * mb
)
var (
ip, err = getOutboundIP(host, port)
)
if err != nil {
return err
}
l, err := net.Listen("tcp", net.JoinHostPort(host, port))
if err != nil {
return err
}
const maxMsgSizeHeader = "Max message size"
var mmshLen = strconv.Itoa(len(maxMsgSizeHeader) + 1)
log.Printf("%-"+mmshLen+"s: %s\n", "Address", ip.String())
log.Printf("%-"+mmshLen+"s: %s\n", "Port", port)
log.Printf("%-"+mmshLen+"s: %d MB, %d bytes\n", maxMsgSizeHeader, maxMsgMB, maxMsgSize)
fmt.Println()
var grpcServer = grpc.NewServer(
grpc.MaxSendMsgSize(maxMsgSize),
grpc.MaxRecvMsgSize(maxMsgSize),
)
go ctrlc(grpcServer)
buffs.RegisterNodeServer(grpcServer, grpc_node.New())
log.Printf("Registered node")
reflection.Register(grpcServer)
log.Printf("Registered reflection")
log.Println("Serving gRPC ...")
fmt.Println()
go func() {
time.Sleep(12 * time.Second)
grpcServer.Stop()
}()
return grpcServer.Serve(l)
}
func ctrlc(s *grpc.Server) {
var c = make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
log.Println("Stopping gRPC server ...")
// TODO: flesh this out a bit more
s.Stop()
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cloudferro
import (
"github.com/CS-SI/SafeScale/providers"
"github.com/CS-SI/SafeScale/providers/api"
"github.com/CS-SI/SafeScale/providers/metadata"
"github.com/CS-SI/SafeScale/providers/model/enums/VolumeSpeed"
"github.com/CS-SI/SafeScale/providers/openstack"
gc "github.com/gophercloud/gophercloud"
ops "github.com/gophercloud/gophercloud/openstack"
)
// AuthOptions fields are the union of those recognized by each identity implementation and provider.
type AuthOptions struct {
Username string
Password string
TenantName string
Region string
DomainName string
ProjectName string
ProjectID string
}
// func parseOpenRC(openrc string) (*openstack.AuthOptions, error) {
// tokens := strings.Split(openrc, "export")
// }
// AuthenticatedClient returns an authenticated client
func AuthenticatedClient(opts AuthOptions, cfg openstack.CfgOptions) (*Client, error) {
const identityEndpoint = "https://cf2.cloudferro.com:5000/v3"
os, err := openstack.AuthenticatedClient(
openstack.AuthOptions{
IdentityEndpoint: identityEndpoint,
Username: opts.Username,
Password: opts.Password,
DomainName: opts.DomainName,
TenantName: opts.ProjectName,
Region: opts.Region,
FloatingIPPool: "external",
AllowReauth: true,
},
openstack.CfgOptions{
ProviderNetwork: "external",
UseFloatingIP: true,
UseLayer3Networking: true,
AutoHostNetworkInterfaces: true,
VolumeSpeeds: map[string]VolumeSpeed.Enum{
"HDD": VolumeSpeed.HDD,
"SSD": VolumeSpeed.SSD,
},
MetadataBucket: metadata.BuildMetadataBucketName(opts.ProjectID),
DNSList: []string{"1.1.1.1", "8.8.8.8"},
DefaultImage: cfg.DefaultImage,
},
)
if err != nil {
return nil, err
}
// Storage API V2
blocstorage, err := ops.NewBlockStorageV2(os.Provider, gc.EndpointOpts{
Region: opts.Region,
})
os.Volume = blocstorage
if err != nil {
return nil, err
}
_, err = openstack.VerifyEndpoints(os)
if err != nil {
return nil, err
}
client := &Client{
Client: os,
opts: opts,
}
return client, nil
}
// Client is the implementation of the ovh driver regarding to the api.ClientAPI
// This client used ovh api and opensatck ovh api to maximize code reuse
type Client struct {
*openstack.Client
opts AuthOptions
}
// Build build a new Client from configuration parameter
func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {
// tenantName, _ := params["name"].(string)
identity, _ := params["identity"].(map[string]interface{})
compute, _ := params["compute"].(map[string]interface{})
// network, _ := params["network"].(map[string]interface{})
username, _ := identity["Username"].(string)
password, _ := identity["Password"].(string)
domainName, _ := identity["UserDomainName"].(string)
region, _ := compute["Region"].(string)
projectName, _ := compute["ProjectName"].(string)
projectID, _ := compute["ProjectID"].(string)
defaultImage, _ := compute["DefaultImage"].(string)
return AuthenticatedClient(
AuthOptions{
Username: username,
Password: password,
Region: region,
DomainName: domainName,
ProjectName: projectName,
ProjectID: projectID,
},
openstack.CfgOptions{
DefaultImage: defaultImage,
},
)
}
// // GetCfgOpts return configuration parameters
// func (c *Client) GetCfgOpts() (model.Config, error) {
// cfg := model.ConfigMap{}
// cfg.Set("DNSList", c.Cfg.DNSList)
// // cfg.Set("ObjectStorageType", c.Cfg.ObjectStorageType)
// cfg.Set("AutoHostNetworkInterfaces", c.Cfg.AutoHostNetworkInterfaces)
// cfg.Set("UseLayer3Networking", c.Cfg.UseLayer3Networking)
// cfg.Set("MetadataBucket", c.Cfg.MetadataBucketName)
// return cfg, nil
// }
func init() {
providers.Register("cloudferro", &Client{})
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package eve
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/jasonish/evebox/log"
"github.com/jasonish/evebox/util"
"strings"
"time"
)
// A EveEvent is an Eve event decoded into map[string]interface{} which
// contains all the data in its raw format.
type EveEvent map[string]interface{}
func NewEveEventFromBytes(b []byte) (event EveEvent, err error) {
decoder := json.NewDecoder(bytes.NewReader(b))
decoder.UseNumber()
if err := decoder.Decode(&event); err != nil {
return nil, err
}
// Create empty tags if it doesn't exist.
if event["tags"] == nil {
event["tags"] = []interface{}{}
}
// Attempt to parse the timestamp, fail the decode if it can't be
// parsed.
timestamp, err := event.parseTimestamp()
if err != nil {
return nil, err
}
// Cache the timestamp.
event["__parsed_timestamp"] = timestamp
return event, nil
}
func NewEveEventFromString(s string) (event EveEvent, err error) {
return NewEveEventFromBytes([]byte(s))
}
func (e EveEvent) MarshalJSON() ([]byte, error) {
event := map[string]interface{}{}
for key, val := range e {
if strings.HasPrefix(key, "__") {
continue
}
event[key] = val
}
return json.Marshal(event)
}
func (e EveEvent) parseTimestamp() (time.Time, error) {
tsstring, ok := e["timestamp"].(string)
if !ok {
return time.Time{}, fmt.Errorf("not a string")
}
return ParseTimestamp(tsstring)
}
func (e EveEvent) Timestamp() time.Time {
return e["__parsed_timestamp"].(time.Time)
}
func (e EveEvent) SetTimestamp(ts time.Time) {
e["timestamp"] = FormatTimestamp(ts)
e["__parsed_timestamp"] = ts
}
func (e EveEvent) EventType() string {
if eventType, ok := e["event_type"].(string); ok {
return eventType
}
return ""
}
func (e EveEvent) Packet() []byte {
packet, ok := e["packet"].(string)
if !ok {
return nil
}
buf, err := base64.StdEncoding.DecodeString(packet)
if err != nil {
return nil
}
return buf
}
func (e EveEvent) Proto() string {
return e.GetString("proto")
}
func (e EveEvent) SrcIp() string {
return e.GetString("src_ip")
}
func (e EveEvent) DestIp() string {
return e.GetString("dest_ip")
}
func (e EveEvent) SrcPort() uint16 {
return asUint16(e["src_port"])
}
func (e EveEvent) DestPort() uint16 {
return asUint16(e["dest_port"])
return e["dest_port"].(uint16)
}
func (e EveEvent) IcmpType() uint8 {
return uint8(asUint16(e["icmp_type"]))
}
func (e EveEvent) IcmpCode() uint8 {
return uint8(asUint16(e["icmp_code"]))
}
func (e EveEvent) Payload() []byte {
packet, ok := e["payload"].(string)
if !ok {
return nil
}
buf, err := base64.StdEncoding.DecodeString(packet)
if err != nil {
return nil
}
return buf
}
func (e EveEvent) GetMap(key string) util.JsonMap {
return util.JsonMap(e).GetMap(key)
}
func (e EveEvent) GetString(key string) string {
return util.JsonMap(e).GetString(key)
}
func (e EveEvent) GetAlert() util.JsonMap {
return util.JsonMap(e).GetMap("alert")
}
func (e EveEvent) GetAlertSignatureId() (uint64, bool) {
ruleId, ok := e.GetMap("alert").Get("signature_id").(json.Number)
if ok {
asInt64, err := ruleId.Int64()
if err == nil {
return uint64(asInt64), true
}
}
return 0, false
}
func (e EveEvent) AddTag(tag string) {
if e["tags"] == nil {
log.Println("Tags is null...")
e["tags"] = []interface{}{}
}
tags := e["tags"].([]interface{})
tags, ok := e["tags"].([]interface{})
if !ok {
log.Warning("Failed to convert tags to []interface{}: %v", e["tags"])
return
}
for _, existing := range tags {
if existing == tag {
return
}
}
tags = append(tags, tag)
e["tags"] = tags
}
func asUint16(in interface{}) uint16 {
if number, ok := in.(json.Number); ok {
asInt64, err := number.Int64()
if err == nil {
return uint16(asInt64)
}
}
return 0
}
|
package gce
import (
"github.com/caos/orbos/internal/helpers"
"github.com/caos/orbos/internal/operator/orbiter/kinds/clusters/core/infra"
"github.com/caos/orbos/mntr"
uuid "github.com/satori/go.uuid"
)
func destroy(svc *machinesService, delegates map[string]interface{}) error {
return helpers.Fanout([]func() error{
func() error {
destroyLB, err := queryLB(svc.context, nil)
if err != nil {
return err
}
return destroyLB()
},
func() error {
pools, err := svc.ListPools()
if err != nil {
return err
}
var delFuncs []func() error
for _, pool := range pools {
machines, err := svc.List(pool)
if err != nil {
return err
}
for _, machine := range machines {
delFuncs = append(delFuncs, machine.Remove)
}
}
if err := helpers.Fanout(delFuncs)(); err != nil {
return err
}
return helpers.Fanout([]func() error{
func() error {
var deleteDisks []func() error
deleteMonitor := svc.context.monitor.WithField("type", "persistent disk")
for kind, delegate := range delegates {
volumes, ok := delegate.([]infra.Volume)
if ok {
for idx := range volumes {
diskName := volumes[idx].Name
deleteDisks = append(deleteDisks, deleteDiskFunc(svc.context, deleteMonitor.WithField("id", diskName), kind, diskName))
}
}
}
return helpers.Fanout(deleteDisks)()
},
func() error {
_, deleteFirewalls, err := queryFirewall(svc.context, nil)
if err != nil {
return err
}
return destroyNetwork(svc.context, deleteFirewalls)
},
})()
},
})()
}
func deleteDiskFunc(context *context, monitor mntr.Monitor, kind, id string) func() error {
return func() error {
return operateFunc(
func() { monitor.Debug("Removing resource") },
computeOpCall(context.client.Disks.Delete(context.projectID, context.desired.Zone, id).RequestId(uuid.NewV1().String()).Do),
func() error { monitor.Info("Resource removed"); return nil },
)()
}
}
|
package handler
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/tada3/triton/logging"
"github.com/tada3/triton/weather"
"github.com/tada3/triton/weather/model"
"github.com/tada3/triton/weather/util"
"github.com/tada3/triton/game"
"github.com/tada3/triton/protocol"
"github.com/tada3/triton/tritondb"
)
var (
log *logging.Entry
masterRepo *game.GameMasterRepo
)
func init() {
log = logging.NewEntry("handler")
masterRepo = game.NewGameMasterRepo()
}
func Dispatch(w http.ResponseWriter, r *http.Request) {
req, err := parseRequest(r)
if err != nil {
log.Error("JSON decoding failed!", err)
respondError(w, "Invalid reqeuest!")
return
}
reqType := req.Request.Type
log.Info("request type: %s", reqType)
userId := getUserId(req)
var response protocol.CEKResponse
switch reqType {
case "LaunchRequest":
response = handleLaunchRequest()
case "SessionEndedRequest":
response = protocol.MakeCEKResponse(handleEndRequest())
case "IntentRequest":
intentName := getIntentName(req)
if intentName == "CurrentWeather" {
response = handleCurrentWeather(req, userId)
} else if intentName == "TomorrowWeather" {
response = handleTomorrowWeather(req, userId)
} else if intentName == "Tomete" {
response = handleTomete(req, userId)
} else if intentName == "Arigato" {
response = handleArigato(req, userId)
} else if intentName == "Sugoine" {
response = handleSugoine(req, userId)
} else if intentName == "Doita" {
response = handleDoita(req, userId)
} else if intentName == "Question" {
response = handleQuestion(req, userId)
} else if intentName == "Samui" {
response = handleSamui(req, userId)
} else if intentName == "Clova.YesIntent" {
response = handleYesIntent(req, userId)
} else if intentName == "ClovaNoIntent" {
response = handleNoIntent(req, userId)
} else {
response = handleUnknownRequest(req)
}
}
w.Header().Set("Content-Type", "application/json")
b, _ := json.Marshal(&response)
log.Info("<<< %s", string(b))
w.Write(b)
}
func parseRequest(r *http.Request) (protocol.CEKRequest, error) {
defer r.Body.Close()
var req protocol.CEKRequest
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return req, err
}
log.Info(">>> %s", string(body))
err = json.Unmarshal(body, &req)
return req, err
}
func getUserId(req protocol.CEKRequest) string {
system0 := req.Contexts["System"]
system, ok := system0.(map[string]interface{})
if !ok {
return ""
}
user0 := system["user"]
user, ok := user0.(map[string]string)
if !ok {
return ""
}
return user["userId"]
}
func handleTomorrowWeather(req protocol.CEKRequest, userID string) protocol.CEKResponse {
var msg string
var p protocol.CEKResponsePayload
// 0. Get city
city := genCityInfoFromSlots(req)
if city == nil || city.CityName == "" {
log.Info("Cannot get city from slots: %+v", req.Request.Intent)
msg = game.GetMessage2(game.NoCity)
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
var found bool
msg, found = game.GetMessageForSpecialCity(city.CityName)
if found {
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
log.Info("city: %v", city)
// 1. Check cache
// 2. Get tomorrow weather
tw, err := weather.GetTomorrowWeather(city)
if err != nil {
log.Error("Error!", err)
msg = "ごめんなさい、システムの調子が良くないみたいです。しばらくしてからもう一度お試しください。"
return getErrorResponse(msg)
}
if tw == nil {
log.Info("Weather for %v is not found.", city)
msg = game.GetMessage2(game.WeatherNotFound, city.CityName)
}
// 3. Generate message
countryName := ""
if city.CountryCode != "" && city.CountryCode != "HK" && city.CountryCode != "JP" {
cn, found := tritondb.CountryCode2CountryName(city.CountryCode)
if found {
countryName = cn
} else {
log.Info("CountryName is not found: %s\n", city.CountryCode)
}
}
cityName := convertCityName(city.CityName)
if countryName != "" && countryName != cityName {
msg = game.GetMessage(game.TomorrowWeather2, util.GetDayStr(tw.Day),
countryName, cityName, tw.Weather,
util.GetTempRangeStr(tw.TempMin, tw.TempMax))
} else {
msg = game.GetMessage(game.TomorrowWeather, util.GetDayStr(tw.Day),
cityName, tw.Weather,
util.GetTempRangeStr(tw.TempMin, tw.TempMax))
}
// 5. Make response
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleCurrentWeather(req protocol.CEKRequest, userID string) protocol.CEKResponse {
var msg string
var p protocol.CEKResponsePayload
// 0. Get City
city := genCityInfoFromSlots(req)
if city == nil || city.CityName == "" {
log.Info("Cannot get city from slots: %+v", req.Request.Intent)
msg = game.GetMessage2(game.NoCity)
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
var found bool
msg, found = game.GetMessageForSpecialCity(city.CityName)
if found {
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
log.Info("city: %v", city)
// 1. Check cache
cityInput := city.Clone()
cw, found := weather.GetCurrentWeatherFromCache(cityInput)
if !found {
log.Info("Cache miss: %v", cityInput)
// 2. Get weather
var err error
cw, err = weather.GetCurrentWeather(city)
if err != nil {
log.Error("Error!", err)
msg = "ごめんなさい、システムの調子が良くないみたいです。しばらくしてからもう一度お試しください。"
return getErrorResponse(msg)
}
// 3. Set cache
// Although city info should have been added more details in GetCurrentWeather,
// we use the original cityInput as the cache key. If you use the elaborated
// city info as the cache key, cache would not hit.
weather.SetCurrentWeatherToCache(cityInput, cw)
}
// 4. Generate message
if cw != nil {
countryName := ""
if cw.CountryCode != "" && cw.CountryCode != "HK" && cw.CountryCode != "JP" {
cn, found := tritondb.CountryCode2CountryName(cw.CountryCode)
if found {
countryName = cn
} else {
log.Info("CountryName is not found: %s", city.CountryCode)
}
}
cityName := convertCityName(city.CityName)
if countryName != "" && countryName != cityName {
msg = game.GetMessage(game.CurrentWeather2, countryName, cityName, cw.Weather, cw.TempStr)
} else {
msg = game.GetMessage(game.CurrentWeather, cityName, cw.Weather, cw.TempStr)
}
} else {
log.Info("Weather for %v is not found.", city)
msg = game.GetMessage2(game.WeatherNotFound, city.CityName)
}
// 5. Make response
p = protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func convertCityName(name string) string {
if name == "HK" {
return "香港"
}
return name
}
func handleTomete(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.Tomete)
p := protocol.MakeCEKResponsePayload(msg, true)
return protocol.MakeCEKResponse(p)
}
func handleArigato(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.Arigato)
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleSugoine(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.Sugoine)
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleQuestion(req protocol.CEKRequest, userID string) protocol.CEKResponse {
intent := req.Request.Intent
slots := intent.Slots
qitem := protocol.GetStringSlot(slots, "qitem")
var msg string
if qitem == "煙霧" {
msg = game.GetMessage(game.Enmu)
} else if qitem == "もや" {
msg = game.GetMessage(game.Moya)
} else {
if qitem == "" {
msg = game.GetMessage2(game.NoCity)
} else {
msg = game.GetMessage2(game.UnknownQItem, qitem)
}
}
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleSamui(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.Samui)
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleYesIntent(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.Yes)
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleNoIntent(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2(game.No)
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
func handleDoita(req protocol.CEKRequest, userID string) protocol.CEKResponse {
msg := game.GetMessage2Random(game.Doita, 0.85)
if msg == "" {
msg = game.GetOsusumeMessage()
}
p := protocol.MakeCEKResponsePayload(msg, false)
return protocol.MakeCEKResponse(p)
}
// getCityFromCountrySlot3 checks country type slots and create CityInfo with it.
// Second return value represents weather country type slots exists or not.
func getCityFromCountrySlot3(slots map[string]protocol.CEKSlot) *model.CityInfo {
country := protocol.GetStringSlot(slots, "country_snt")
if country != "" {
city, found := tritondb.CountryName2City2(country)
if found {
return city
}
log.Warn("country not found: %s", country)
}
country = protocol.GetStringSlot(slots, "ken_jp")
if country != "" {
city, found := tritondb.CountryName2City2(country)
if found {
return city
}
log.Warn("country not found: %s", country)
}
country = protocol.GetStringSlot(slots, "country")
if country != "" {
city, found := tritondb.Country2City(country)
if found {
return city
}
log.Warn("country not found: %s", country)
}
return nil
}
// getCityFromPoiSlots checks poi type slots and populates the passed CityInfo.
// Second return value represents weather poi type slots exists or not.
func getCityFromPoiSlots(slots map[string]protocol.CEKSlot, cityInfo *model.CityInfo) (*model.CityInfo, bool) {
poi := protocol.GetStringSlot(slots, "poi_snt")
if poi == "" {
return cityInfo, false
}
log.Debug("poi: %s", poi)
cityInfo, found, err := tritondb.Poi2City(poi, cityInfo)
if err != nil {
fmt.Println("ERROR!", err.Error())
}
if !found {
fmt.Printf("WARN: POI not found: %s\n", poi)
}
return cityInfo, true
}
func getCityFromCitySlot3(slots map[string]protocol.CEKSlot, cityInfo *model.CityInfo) *model.CityInfo {
if cityInfo == nil {
cityInfo = &model.CityInfo{}
}
var cityName string
cityName = protocol.GetStringSlot(slots, "city")
if cityName != "" {
cityInfo.CityName = cityName
return cityInfo
}
cityName = protocol.GetStringSlot(slots, "city_snt")
if cityName != "" {
cityInfo.CityName = cityName
return cityInfo
}
cityName = protocol.GetStringSlot(slots, "city_jp")
if cityName != "" {
if strings.HasSuffix(cityName, "市") {
cityName = strings.TrimRight(cityName, "市")
}
cityInfo.CityName = cityName
if cityInfo.CountryCode == "" {
cityInfo.CountryCode = "JP"
}
return cityInfo
}
return cityInfo
}
func genCityInfoFromSlots(req protocol.CEKRequest) *model.CityInfo {
intent := req.Request.Intent
slots := intent.Slots
cityInfo := getCityFromCountrySlot3(slots)
cityInfo, poiExists := getCityFromPoiSlots(slots, cityInfo)
if poiExists {
return cityInfo
}
return getCityFromCitySlot3(slots, cityInfo)
}
func handleStartNew(req protocol.CEKRequest, userId string) protocol.CEKResponse {
prevSize := 0
gm := masterRepo.GetGameMaster(userId)
if gm != nil {
gm.Stop()
prevSize, _ = gm.GetSize()
}
gm = game.NewGameMaster()
masterRepo.AddGameMaster(userId, gm)
err := gm.StartNew()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
// After calling of gm.StartNew(). So maze should exist.
size, _ := gm.GetSize()
start, err := gm.GetStart()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
goal, err := gm.GetGoal()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
var msg1 string
if size != prevSize {
msg1 = game.GetMessage(game.START_MSG_NEW, size, size, start, goal)
} else {
msg1 = game.GetMessage(game.START_MSG_NEW_SIMPLE, start, goal)
}
msg2 := game.GetMessage(game.RepromptMsg2)
p := protocol.MakeCEKResponsePayload2(msg1, msg2, false)
return protocol.MakeCEKResponse(p)
}
func handleStartOver(req protocol.CEKRequest, userId string) protocol.CEKResponse {
gm := masterRepo.GetGameMaster(userId)
if gm == nil {
return handleStartNew(req, userId)
}
gm.Stop()
err := gm.StartOver()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
start, err := gm.GetStart()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
goal, err := gm.GetGoal()
if err != nil {
fmt.Println("ERROR!", err)
return handleInvalidRequest(req)
}
msg1 := game.GetMessage(game.START_MSG_REPEAT, start, goal)
msg2 := game.GetMessage(game.RepromptMsg2)
p := protocol.MakeCEKResponsePayload2(msg1, msg2, false)
return protocol.MakeCEKResponse(p)
}
func location2String222(loc game.Location) string {
return fmt.Sprintf("%dの%d", loc.X, loc.Y)
}
func handleLaunchRequest() protocol.CEKResponse {
osVal1 := protocol.MakeOutputSpeechUrlValue(game.GetSoundURL(game.OpeningSound))
osVal2 := protocol.MakeOutputSpeechTextValue(game.GetMessage(game.WelcomeMsg))
os := protocol.MakeOutputSpeechList(osVal1, osVal2)
p := protocol.CEKResponsePayload{
OutputSpeech: os,
ShouldEndSession: false,
}
return protocol.MakeCEKResponse(p)
}
func handleEndRequest() protocol.CEKResponsePayload {
msg := game.GetMessage(game.GoodbyMsg)
return protocol.CEKResponsePayload{
OutputSpeech: protocol.MakeSimpleOutputSpeech(msg),
ShouldEndSession: true,
}
}
func handleUnknownRequest(req protocol.CEKRequest) protocol.CEKResponse {
msg := game.GetMessage(game.InquirelyMsg)
p := protocol.CEKResponsePayload{
OutputSpeech: protocol.MakeSimpleOutputSpeech(msg),
ShouldEndSession: false,
}
return protocol.MakeCEKResponse(p)
}
func handleInvalidRequest(req protocol.CEKRequest) protocol.CEKResponse {
msg := game.GetMessage2(game.InvalidActionMsg)
p := protocol.CEKResponsePayload{
OutputSpeech: protocol.MakeSimpleOutputSpeech(msg),
ShouldEndSession: false,
}
return protocol.MakeCEKResponse(p)
}
func getErrorResponse(msg string) protocol.CEKResponse {
p := protocol.CEKResponsePayload{
OutputSpeech: protocol.MakeSimpleOutputSpeech(msg),
ShouldEndSession: true,
}
return protocol.MakeCEKResponse(p)
}
func respondError(w http.ResponseWriter, msg string) {
response := protocol.MakeCEKResponse(
protocol.CEKResponsePayload{
OutputSpeech: protocol.MakeSimpleOutputSpeech(msg),
})
w.Header().Set("Content-Type", "application/json")
b, _ := json.Marshal(&response)
w.Write(b)
}
func getIntentName(req protocol.CEKRequest) string {
name := req.Request.Intent.Name
return name
}
// HealthCheck just returns 'OK'.
func HealthCheck(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "OK")
}
|
package timingwheel
import (
"math/rand"
"sync"
"time"
)
var (
count = 5
tws = []*TimingWheel{}
once sync.Once
)
func init() {
for i := 0; i < count; i++ {
tw := New(1*time.Second, 600) // 10 minite
tw.Start()
tws = append(tws, tw)
}
}
func SetDefaultTimeingWheels(obj []*TimingWheel) {
for _, tw := range tws {
tw.Stop()
}
tws = obj
for _, tw := range tws {
tw.Start()
}
}
func Sleep(timeout time.Duration) {
<-After(timeout)
}
func After(timeout time.Duration) chan struct{} {
n := rand.Intn(count) // safe array bound
return tws[n].After(timeout)
}
func Stop() {
once.Do(func() {
for _, tw := range tws {
tw.Stop()
}
})
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-25 10:50
* Description:
*****************************************************************/
package pdl
type PDLQuery interface {
GetServiceByFullName(fullName string) *FileService
QryService(fullName string) (*TPDLNamespace, *FileService)
QryServiceByNS(namespace string, svcName string) (*TPDLNamespace, *FileService)
QryTypedef(fullName string) (*TPDLNamespace, *FileTypeDef)
QryTypeDefByNS(namespace string, defName string) (*TPDLNamespace, *FileTypeDef)
QryType(fullName string) (*TPDLNamespace, *FileStruct)
QryTypeByNS(namespace string, typName string) (*TPDLNamespace, *FileStruct)
QryMethod(svcFullName string, methodName string) (*TPDLNamespace, *FileService, *FileServiceMethod)
QryMethodByNS(namespace string, svcName string, methodName string) (*TPDLNamespace, *FileService, *FileServiceMethod)
QryServices(namespace string) (*TPDLNamespace, map[string]*FileService)
AllNamespaces() []string
QryNamespace(namespace string) *TPDLNamespace
}
|
package manage
import (
"github.com/gin-gonic/gin"
"server-monitor-admin/global"
"server-monitor-admin/global/response"
)
func Test1(c *gin.Context) {
println(global.VIPER)
println(global.VIPER.GetString("test.name"))
response.OkMsg(global.VIPER.GetString("test.name"), c)
}
func Test2(c *gin.Context) {
c.Get("")
//var acuser model.AcUser
//find := global.DB.Find(&acuser)
//marshal, _ := json.Marshal(&find)
//println(marshal)
//response.OkData(find, c)
}
|
package main
import (
"fmt"
)
func main() {
var x [4]string //var x[5]int
fmt.Println(x)
x[0] = "May"
x[1] = "June"
x[2] = "July"
x[3] = "August"
fmt.Println(x)
fmt.Println(len(x))
fmt.Println(cap(x))
}
|
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cuegen
import (
"fmt"
gotypes "go/types"
"strconv"
cueast "cuelang.org/go/cue/ast"
cuetoken "cuelang.org/go/cue/token"
)
// Ident returns a new cue identifier with the given name.
func Ident(name string, isDef bool) *cueast.Ident {
if isDef {
name = "#" + name
}
return cueast.NewIdent(name)
}
func basicType(x *gotypes.Basic) cueast.Expr {
// byte is an alias for uint8 in go/types
switch t := x.String(); t {
case "uintptr":
return Ident("uint64", false)
case "byte":
return Ident("uint8", false)
default:
return Ident(t, false)
}
}
func basicLabel(t *gotypes.Basic, v string) (cueast.Expr, error) {
switch {
case t.Info()&gotypes.IsInteger != 0:
if _, err := strconv.ParseInt(v, 10, 64); err != nil {
return nil, err
}
return &cueast.BasicLit{Kind: cuetoken.INT, Value: v}, nil
case t.Info()&gotypes.IsFloat != 0:
if _, err := strconv.ParseFloat(v, 64); err != nil {
return nil, err
}
return &cueast.BasicLit{Kind: cuetoken.FLOAT, Value: v}, nil
case t.Info()&gotypes.IsBoolean != 0:
b, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
return cueast.NewBool(b), nil
case t.Info()&gotypes.IsString != 0:
return cueast.NewString(v), nil
default:
return nil, fmt.Errorf("unsupported basic type %s", t)
}
}
|
package sequentialdigits
import (
"sort"
"strconv"
"strings"
)
func isSequential(nums []int) bool {
isSequential := true
for ind := 0; ind < len(nums)-1; ind++ {
numInt := nums[ind]
nextNumInt := nums[ind+1]
if nextNumInt-numInt != 1 {
isSequential = false
break
}
}
return isSequential
}
func backTrack(
solutions *[][]int,
candidates []int,
solution []int,
startIndex int,
targetLength int,
freqMap map[int]int,
) {
if len(solution) > targetLength {
return
}
if len(solution) == targetLength && isSequential(solution) {
solutionCopy := make([]int, len(solution))
copy(solutionCopy, solution)
*solutions = append(*solutions, solutionCopy)
return
}
for ind := startIndex; ind < len(candidates); ind++ {
if freqMap[candidates[ind]] > 0 {
freqMap[candidates[ind]]--
solution = append(solution, candidates[ind])
backTrack(solutions, candidates, solution, startIndex, targetLength, freqMap)
freqMap[candidates[ind]]++
solution = solution[:len(solution)-1]
}
}
}
func convertToStringArray(intArray []int) []string {
stringArr := make([]string, 0)
for _, num := range intArray {
stringArr = append(stringArr, strconv.Itoa(num))
}
return stringArr
}
// SequentialDigits ...
func SequentialDigits(low int, high int) []int {
lowLength := len(strconv.Itoa(low))
highLength := len(strconv.Itoa(high))
candidates := []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
solutions := [][]int{}
seqLength := lowLength
for seqLength >= lowLength && seqLength <= highLength {
freqMap := map[int]int{
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
9: 1,
}
backTrack(&solutions, candidates, []int{}, 0, seqLength, freqMap)
seqLength++
}
finalSolutionsUnique := make(map[int]int, 0)
for _, solution := range solutions {
solutionAsStringArr := convertToStringArray(solution)
solutionAsNum, _ := strconv.Atoi(strings.Join(solutionAsStringArr, ""))
finalSolutionsUnique[solutionAsNum] = 1
}
finalSolutions := make([]int, 0)
for solution := range finalSolutionsUnique {
if solution >= low && solution <= high {
finalSolutions = append(finalSolutions, solution)
}
}
sort.Ints(finalSolutions)
return finalSolutions
}
|
package chargen
import "math/rand"
func randomItem(items []string) string {
return items[rand.Intn(len(items))]
}
func itemInCollection(item string, collection []string) bool {
for _, element := range collection {
if item == element {
return true
}
}
return false
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/runtimeconfig/beta/runtimeconfig_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/runtimeconfig/beta"
)
// Server implements the gRPC interface for Config.
type ConfigServer struct{}
// ProtoToConfig converts a Config resource from its proto representation.
func ProtoToConfig(p *betapb.RuntimeconfigBetaConfig) *beta.Config {
obj := &beta.Config{
Name: dcl.StringOrNil(p.Name),
Description: dcl.StringOrNil(p.Description),
Project: dcl.StringOrNil(p.Project),
}
return obj
}
// ConfigToProto converts a Config resource to its proto representation.
func ConfigToProto(resource *beta.Config) *betapb.RuntimeconfigBetaConfig {
p := &betapb.RuntimeconfigBetaConfig{
Name: dcl.ValueOrEmptyString(resource.Name),
Description: dcl.ValueOrEmptyString(resource.Description),
Project: dcl.ValueOrEmptyString(resource.Project),
}
return p
}
// ApplyConfig handles the gRPC request by passing it to the underlying Config Apply() method.
func (s *ConfigServer) applyConfig(ctx context.Context, c *beta.Client, request *betapb.ApplyRuntimeconfigBetaConfigRequest) (*betapb.RuntimeconfigBetaConfig, error) {
p := ProtoToConfig(request.GetResource())
res, err := c.ApplyConfig(ctx, p)
if err != nil {
return nil, err
}
r := ConfigToProto(res)
return r, nil
}
// ApplyConfig handles the gRPC request by passing it to the underlying Config Apply() method.
func (s *ConfigServer) ApplyRuntimeconfigBetaConfig(ctx context.Context, request *betapb.ApplyRuntimeconfigBetaConfigRequest) (*betapb.RuntimeconfigBetaConfig, error) {
cl, err := createConfigConfig(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyConfig(ctx, cl, request)
}
// DeleteConfig handles the gRPC request by passing it to the underlying Config Delete() method.
func (s *ConfigServer) DeleteRuntimeconfigBetaConfig(ctx context.Context, request *betapb.DeleteRuntimeconfigBetaConfigRequest) (*emptypb.Empty, error) {
cl, err := createConfigConfig(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteConfig(ctx, ProtoToConfig(request.GetResource()))
}
// ListRuntimeconfigBetaConfig handles the gRPC request by passing it to the underlying ConfigList() method.
func (s *ConfigServer) ListRuntimeconfigBetaConfig(ctx context.Context, request *betapb.ListRuntimeconfigBetaConfigRequest) (*betapb.ListRuntimeconfigBetaConfigResponse, error) {
cl, err := createConfigConfig(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListConfig(ctx, request.Project, request.Name)
if err != nil {
return nil, err
}
var protos []*betapb.RuntimeconfigBetaConfig
for _, r := range resources.Items {
rp := ConfigToProto(r)
protos = append(protos, rp)
}
return &betapb.ListRuntimeconfigBetaConfigResponse{Items: protos}, nil
}
func createConfigConfig(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package redis
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/go-session/redis"
"github.com/go-session/session"
"github.com/linqiurong2021/gin-arcgis/config"
)
// RedisStore RedisStore
var RedisStore session.Store
// InitRedisSession 初始化
func InitRedisSession(cfg *config.RedisConfig) {
addr := fmt.Sprintf("%s:%s", cfg.Host, cfg.Port)
// 初始化Session
session.InitManager(
session.SetStore(redis.NewRedisStore(&redis.Options{
Addr: addr,
DB: cfg.Database,
})),
)
}
// GetStore 获取Store
func GetStore(c *gin.Context) (store session.Store, err error) {
return session.Start(c, c.Writer, c.Request)
}
// Set 设置Redis
func Set(c *gin.Context, key string, value interface{}) (store session.Store, err error) {
//
store, err = GetStore(c)
if err != nil {
return nil, err
}
store.Set(key, value)
err = store.Save()
if err != nil {
return nil, err
}
return store, nil
}
// Get 获取数据
func Get(c *gin.Context, key string) (value interface{}, ok bool, err error) {
//
store, err := GetStore(c)
if err != nil {
return nil, false, err
}
value, ok = store.Get(key)
return value, ok, nil
}
|
/*
xkcd is everyone's favorite webcomic, and you will be writing a program that will bring a little bit more humor to us all.
Your objective in this challenge is to write a program which will take a number as input and display that xkcd and its title-text (mousover text).
Input
Your program will take a positive integer as input (not necessarily one for which there exists a valid comic) and display that xkcd: for example, an input of 1500 should display the comic "Upside-Down Map" at xkcd.com/1500,
and then either print its title-text to the console or display it with the image.
Due to their proximity across the channel, there's long been tension between North Korea and the United Kingdom of Great Britain and Southern Ireland.
Due to their proximity across the channel, there's long been tension between North Korea and the United Kingdom of Great Britain and Southern Ireland.
Test case 2, for n=859:
Brains aside, I wonder how many poorly-written xkcd.com-parsing scripts will break on this title (or ;;"''{<<[' this mouseover text."
Brains aside, I wonder how many poorly-written xkcd.com-parsing scripts will break on this title (or ;;"''{<<[' this mouseover text."
Your program should also be able to function without any input, and perform the same task for the most recent xkcd found at xkcd.com, and it should always display the most recent one even when a new one goes up.
You do not have to get the image directly from xkcd.com, you can use another database as long as it is up-to-date and already existed before this challenge went up.
URL shortners, that is, urls with no purpose other than redirecting to somewhere else, are not allowed.
You may display the image in any way you chose, including in a browser. You may not, however, directly display part of another page in an iframe or similar.
CLARIFICATION: you cannot open a preexisting webpage, if you wish to use the browser you have to create a new page. You must also actually display an image - outputting an image file is not allowed.
You can handle the case that there isn't an image for a particular comic (e.g. it is interactive or the program was passed a number greater than the amount of comics that have been released) in any reasonable way you wish, including throwing an exception,
or printing out an at least single-character string, as long as it somehow signifies to the user that there isn't an image for that input.
You can only display an image and output its title-text, or output an error message for an invalid comic. Other output is not allowed.
This is a code-golf challenge, so the fewest bytes wins.
*/
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"golang.org/x/net/html"
)
func main() {
log.SetFlags(0)
log.SetPrefix("get-xkcd-title: ")
parseflags()
url := fmt.Sprintf("https://xkcd.com/%v", flag.Arg(0))
data, err := get(url)
ck(err)
text, err := extract(data)
ck(err)
fmt.Println(text)
}
func ck(err error) {
if err != nil {
log.Fatal(err)
}
}
func parseflags() {
flag.Usage = usage
flag.Parse()
if flag.NArg() < 1 {
usage()
}
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: [options] comic")
flag.PrintDefaults()
os.Exit(2)
}
func get(url string) (b []byte, err error) {
r, err := http.Get(url)
if err != nil {
return
}
return io.ReadAll(r.Body)
}
func extract(b []byte) (s string, err error) {
r := bytes.NewReader(b)
d, err := html.Parse(r)
if err != nil {
return
}
s = walk(d)
return
}
func walk(n *html.Node) (s string) {
if n.Type == html.ElementNode && n.Data == "img" {
for _, a := range n.Attr {
if a.Key == "title" {
return a.Val
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
if s = walk(c); s != "" {
return
}
}
return
}
|
package pg_astro
import (
"bufio"
"bytes"
"log"
"net"
"sync"
"time"
)
var printMx sync.Mutex
type Server struct {
Addr string
IdleTimeout time.Duration
IdleControllerTimeout time.Duration
MaxReadBytes int64
listener net.Listener
conns map[*conn]struct{}
mu sync.Mutex
inShutdown bool
done chan struct{}
}
func (srv *Server) ListenAndServe() error {
srv.done = make(chan struct{})
addr := srv.Addr
if addr == "" {
addr = ":8080"
}
log.Printf("starting server on %v\n", addr)
listener, err := net.Listen("tcp", addr)
if err != nil {
return err
}
defer listener.Close()
srv.listener = listener
for {
// should be guarded by mu
if srv.inShutdown {
break
}
newConn, err := listener.Accept()
if err != nil {
log.Printf("error accepting connection %v", err)
continue
}
log.Printf("accepted connection from %v", newConn.RemoteAddr())
conn := &conn{
Conn: newConn,
IdleTimeout: srv.IdleTimeout,
MaxReadBuffer: srv.MaxReadBytes,
}
srv.trackConn(conn)
conn.SetDeadline(time.Now().Add(conn.IdleTimeout))
go srv.handle(conn)
}
return nil
}
func (srv *Server) trackConn(c *conn) {
defer srv.mu.Unlock()
srv.mu.Lock()
if srv.conns == nil {
srv.conns = make(map[*conn]struct{})
}
srv.conns[c] = struct{}{}
}
func (srv *Server) handle(conn *conn) error {
defer func() {
log.Printf("closing connection from %v", conn.RemoteAddr())
conn.Close()
srv.deleteConn(conn)
}()
w := bufio.NewWriter(conn)
var wg sync.WaitGroup
wg.Add(1)
var lastTime = time.Now()
go func() {
var b [4096]byte
//buffer := make([]byte, 0)
for {
len, err := conn.Read(b[:])
if err != nil {
e, ok := err.(net.Error)
if ok && e.Timeout() {
if srv.inShutdown {
break
}
if time.Since(lastTime) > srv.IdleControllerTimeout {
log.Println("Controller connection timeout is expired")
break
} else {
lastTime = time.Now()
continue
}
}
log.Printf("Package read failed: %s\n", err.Error())
break
}
//extractPackage(b[:len], buffer)
log.Println("==> " + string(b[:len]))
}
wg.Done()
}()
w.WriteString(`{"node":"0xFFFFFFFF","list":0}`)
w.Flush()
wg.Wait()
return nil
}
func extractPackage(message []byte, buffer []byte) {
s := bytes.Trim(message, "\n \t")
endOfPackage := bytes.Index(s, []byte("}"))
if endOfPackage == -1 {
// String doesn't contain '}'
buffer = append(buffer, s...)
} else {
// String contains '}'
startOfPackage := bytes.LastIndex(buffer, []byte("{"))
if startOfPackage != -1 {
// Start of package in the buffer
pkg := buffer[startOfPackage:]
buffer = buffer[:startOfPackage]
pkg = append(pkg, s[:endOfPackage+1]...)
//parsePackage(pkg)
//log.Println(string(pkg))
extractPackage(s[endOfPackage+1:], buffer)
} else {
// Start of package doesn't exists in buffer
startOfPackage = bytes.Index(s, []byte("{"))
if startOfPackage == -1 {
log.Println("Wrong package received")
} else {
//parsePackage(s[startOfPackage, endOfPackage + 1])
//log.Println(string(s[startOfPackage : endOfPackage+1]))
extractPackage(s[endOfPackage+1:], buffer)
}
}
}
}
func (srv *Server) deleteConn(conn *conn) {
defer srv.mu.Unlock()
srv.mu.Lock()
delete(srv.conns, conn)
}
func (srv *Server) Shutdown() {
// should be guarded by mu
srv.inShutdown = true
log.Println("shutting down...")
srv.listener.Close()
close(srv.done)
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Printf("waiting on %v connections", len(srv.conns))
}
if len(srv.conns) == 0 {
return
}
}
}
|
package main
import (
"fmt"
"strconv"
)
// Person define struct
type Person struct {
firstName, lastName, city, gender string
age int
}
// Greeting method , (value reciever) GOOD for CALULATION
func (p Person) greet() string {
return "Hello, my name is " + p.firstName + " " + p.lastName + " and I am " + strconv.Itoa(p.age)
}
// hasBirthday method (pointer reciever) GODD for CHANGE VALUE
func (p *Person) hasBirthday() {
p.age++
}
// getMarried (pointer reciever)
func (p *Person) getMarried(spouseLastName string) {
if p.gender == "Male" {
return
}
p.lastName = spouseLastName
}
func main() {
person1 := Person{
firstName: "Truong",
lastName: "Vo",
city: "HCM",
gender: "Male",
age: 22}
fmt.Println(person1)
// person2 := Person{"Mike", "Triump", "HCM", "Male", 52}
person2 := Person{
firstName: "Mike",
lastName: "Triump",
city: "HCM",
gender: "Male",
age: 52} // should use this way, we can init lesser field
person2.age++
fmt.Println(person2)
person1.hasBirthday()
person1.getMarried("Williams")
fmt.Println(person1.greet())
}
|
package sheet_logic
import (
"fmt"
"hub/sheet_logic/sheet_logic_types"
)
const EmptyExpressionName string = "<none>"
type GrammarContext interface {
GetIntValue(string) (int64, error)
GetFloatValue(string) (float64, error)
GetStringValue(string) (string, error)
GetBoolValue(string) (bool, error)
}
type GrammarElement interface {
GetType() sheet_logic_types.T
GetName() string
SetName(string)
}
type UnaryOperationInt interface {
GetArg() IntExpresion
SetArg(IntExpresion)
}
type UnaryOperationFloat interface {
GetArg() FloatExpresion
SetArg(FloatExpresion)
}
type UnaryOperationString interface {
GetArg() StringExpresion
SetArg(StringExpresion)
}
type UnaryOperationBool interface {
GetArg() BoolExpresion
SetArg(BoolExpresion)
}
type BinaryOperationInt interface {
GetLeftArg() IntExpresion
SetLeftArg(IntExpresion)
GetRightArg() IntExpresion
SetRightArg(IntExpresion)
}
type BinaryOperationFloat interface {
GetLeftArg() FloatExpresion
SetLeftArg(FloatExpresion)
GetRightArg() FloatExpresion
SetRightArg(FloatExpresion)
}
type BinaryOperationString interface {
GetLeftArg() StringExpresion
SetLeftArg(StringExpresion)
GetRightArg() StringExpresion
SetRightArg(StringExpresion)
}
type BinaryOperationBool interface {
GetLeftArg() BoolExpresion
SetLeftArg(BoolExpresion)
GetRightArg() BoolExpresion
SetRightArg(BoolExpresion)
}
// Because there will be also volatile sources (formulas, textfields etc.)
type IntExpresion interface {
CalculateInt(GrammarContext) (int64, error)
}
type EmptyIntExpression struct {
GrammarElement
}
func (e *EmptyIntExpression) CalculateInt(_ GrammarContext) (int64, error) {
return 0, fmt.Errorf("%T.CalculateInt", e)
}
func NewEmptyIntExpression() *EmptyIntExpression {
return &EmptyIntExpression{
&emptyGrammarElementImpl{sheet_logic_types.EmptyIntExpression}}
}
type FloatExpresion interface {
CalculateFloat(GrammarContext) (float64, error)
}
type EmptyFloatExpression struct {
GrammarElement
}
func (e *EmptyFloatExpression) CalculateFloat(_ GrammarContext) (float64, error) {
return 0, fmt.Errorf("%T.CalculateFloat", e)
}
func NewEmptyFloatExpression() *EmptyFloatExpression {
return &EmptyFloatExpression{
&emptyGrammarElementImpl{sheet_logic_types.EmptyFloatExpression}}
}
type StringExpresion interface {
CalculateString(GrammarContext) (string, error)
}
type EmptyStringExpression struct {
GrammarElement
}
func (e *EmptyStringExpression) CalculateString(_ GrammarContext) (string, error) {
return "", fmt.Errorf("%T.CalculateString", e)
}
func NewEmptyStringExpression() *EmptyStringExpression {
return &EmptyStringExpression{
&emptyGrammarElementImpl{sheet_logic_types.EmptyStringExpression}}
}
type BoolExpresion interface {
CalculateBool(GrammarContext) (bool, error)
}
type EmptyBoolExpression struct {
GrammarElement
}
func (e *EmptyBoolExpression) CalculateBool(_ GrammarContext) (bool, error) {
return false, fmt.Errorf("%T.CalculateBool", e)
}
func NewEmptyBoolExpression() *EmptyBoolExpression {
return &EmptyBoolExpression{
&emptyGrammarElementImpl{sheet_logic_types.EmptyBoolExpression}}
}
type grammarElementImpl struct {
name string
grammar_type sheet_logic_types.T
}
func (g *grammarElementImpl) GetName() string {
return g.name
}
func (g *grammarElementImpl) SetName(newName string) {
g.name = newName
}
func (g *grammarElementImpl) GetType() sheet_logic_types.T {
return g.grammar_type
}
type UnaryOperationIntImpl struct {
arg IntExpresion
}
func (u *UnaryOperationIntImpl) GetArg() IntExpresion {
return u.arg
}
func (u *UnaryOperationIntImpl) SetArg(newExpr IntExpresion) {
u.arg = newExpr
}
func DefaultUnaryOperationIntImpl() UnaryOperationInt {
return &UnaryOperationIntImpl{
NewEmptyIntExpression()}
}
type UnaryOperationFloatImpl struct {
arg FloatExpresion
}
func (u *UnaryOperationFloatImpl) GetArg() FloatExpresion {
return u.arg
}
func (u *UnaryOperationFloatImpl) SetArg(newExpr FloatExpresion) {
u.arg = newExpr
}
func DefaultUnaryOperationFloatImpl() UnaryOperationFloat {
return &UnaryOperationFloatImpl{
NewEmptyFloatExpression()}
}
type UnaryOperationStringImpl struct {
arg StringExpresion
}
func (u *UnaryOperationStringImpl) GetArg() StringExpresion {
return u.arg
}
func (u *UnaryOperationStringImpl) SetArg(newExpr StringExpresion) {
u.arg = newExpr
}
func DefaultUnaryOperationStringImpl() UnaryOperationString {
return &UnaryOperationStringImpl{
NewEmptyStringExpression()}
}
type UnaryOperationBoolImpl struct {
arg BoolExpresion
}
func (u *UnaryOperationBoolImpl) GetArg() BoolExpresion {
return u.arg
}
func (u *UnaryOperationBoolImpl) SetArg(newExpr BoolExpresion) {
u.arg = newExpr
}
func DefaultUnaryOperationBoolImpl() UnaryOperationBool {
return &UnaryOperationBoolImpl{
NewEmptyBoolExpression()}
}
type BinaryOperationIntImpl struct {
argLeft IntExpresion
argRight IntExpresion
}
func (b *BinaryOperationIntImpl) GetLeftArg() IntExpresion {
return b.argLeft
}
func (b *BinaryOperationIntImpl) GetRightArg() IntExpresion {
return b.argRight
}
func (b *BinaryOperationIntImpl) SetLeftArg(newExpr IntExpresion) {
b.argLeft = newExpr
}
func (b *BinaryOperationIntImpl) SetRightArg(newExpr IntExpresion) {
b.argRight = newExpr
}
func DefaultBinaryOperationIntImpl() BinaryOperationInt {
return &BinaryOperationIntImpl{
NewEmptyIntExpression(),
NewEmptyIntExpression()}
}
type BinaryOperationFloatImpl struct {
argLeft FloatExpresion
argRight FloatExpresion
}
func (b *BinaryOperationFloatImpl) GetLeftArg() FloatExpresion {
return b.argLeft
}
func (b *BinaryOperationFloatImpl) GetRightArg() FloatExpresion {
return b.argRight
}
func (b *BinaryOperationFloatImpl) SetLeftArg(newExpr FloatExpresion) {
b.argLeft = newExpr
}
func (b *BinaryOperationFloatImpl) SetRightArg(newExpr FloatExpresion) {
b.argRight = newExpr
}
func DefaultBinaryOperationFloatImpl() BinaryOperationFloat {
return &BinaryOperationFloatImpl{
NewEmptyFloatExpression(),
NewEmptyFloatExpression()}
}
type BinaryOperationStringImpl struct {
argLeft StringExpresion
argRight StringExpresion
}
func (b *BinaryOperationStringImpl) GetLeftArg() StringExpresion {
return b.argLeft
}
func (b *BinaryOperationStringImpl) GetRightArg() StringExpresion {
return b.argRight
}
func (b *BinaryOperationStringImpl) SetLeftArg(newExpr StringExpresion) {
b.argLeft = newExpr
}
func (b *BinaryOperationStringImpl) SetRightArg(newExpr StringExpresion) {
b.argRight = newExpr
}
func DefaultBinaryOperationStringImpl() BinaryOperationString {
return &BinaryOperationStringImpl{
NewEmptyStringExpression(),
NewEmptyStringExpression()}
}
type BinaryOperationBoolImpl struct {
argLeft BoolExpresion
argRight BoolExpresion
}
func (b *BinaryOperationBoolImpl) GetLeftArg() BoolExpresion {
return b.argLeft
}
func (b *BinaryOperationBoolImpl) GetRightArg() BoolExpresion {
return b.argRight
}
func (b *BinaryOperationBoolImpl) SetLeftArg(newExpr BoolExpresion) {
b.argLeft = newExpr
}
func (b *BinaryOperationBoolImpl) SetRightArg(newExpr BoolExpresion) {
b.argRight = newExpr
}
func DefaultBinaryOperationBoolImpl() BinaryOperationBool {
return &BinaryOperationBoolImpl{
NewEmptyBoolExpression(),
NewEmptyBoolExpression()}
}
func getFirstError(errors ...error) error {
for _, e := range errors {
if e != nil {
return e
}
}
return nil
}
type emptyGrammarElementImpl struct {
grammar_type sheet_logic_types.T
}
func (g *emptyGrammarElementImpl) GetName() string {
return EmptyExpressionName
}
func (g *emptyGrammarElementImpl) SetName(string) {
// Not doing anything conciously, no need to treat this call as error
}
func (g *emptyGrammarElementImpl) GetType() sheet_logic_types.T {
return g.grammar_type
}
|
package render
import (
"net/http"
renderer "github.com/unrolled/render"
)
var (
Renderer *renderer.Render
)
func init() {
Renderer = renderer.New()
}
func Render(w http.ResponseWriter, e renderer.Engine, data interface{}) error {
return Renderer.Render(w, e, data)
}
func Data(w http.ResponseWriter, status int, v []byte) error {
return Renderer.Data(w, status, v)
}
func HTML(w http.ResponseWriter, status int, name string, binding interface{}, htmlOpt ...renderer.HTMLOptions) error {
return Renderer.HTML(w, status, name, binding, htmlOpt...)
}
func JSON(w http.ResponseWriter, status int, v interface{}) error {
return Renderer.JSON(w, status, v)
}
func JSONP(w http.ResponseWriter, status int, callback string, v interface{}) error {
return Renderer.JSONP(w, status, callback, v)
}
func Text(w http.ResponseWriter, status int, v string) error {
return Renderer.Text(w, status, v)
}
func XML(w http.ResponseWriter, status int, v interface{}) error {
return Renderer.XML(w, status, v)
}
|
package keeper_test
import (
"github.com/irisnet/irismod/modules/nft/keeper"
"github.com/irisnet/irismod/modules/nft/types"
)
func (suite *KeeperSuite) TestSetCollection() {
nft := types.NewBaseNFT(tokenID, tokenNm, address, tokenURI, tokenData)
// create a new NFT and add it to the collection created with the NFT mint
nft2 := types.NewBaseNFT(tokenID2, tokenNm, address, tokenURI, tokenData)
denomE := types.Denom{
Id: denomID,
Name: denomNm,
Schema: schema,
Creator: address,
}
collection2 := types.Collection{
Denom: denomE,
NFTs: []types.BaseNFT{nft2, nft},
}
err := suite.keeper.SetCollection(suite.ctx, collection2)
suite.Nil(err)
collection2, err = suite.keeper.GetCollection(suite.ctx, denomID)
suite.NoError(err)
suite.Len(collection2.NFTs, 2)
msg, fail := keeper.SupplyInvariant(suite.keeper)(suite.ctx)
suite.False(fail, msg)
}
func (suite *KeeperSuite) TestGetCollection() {
// MintNFT shouldn't fail when collection does not exist
err := suite.keeper.MintNFT(suite.ctx, denomID, tokenID, tokenNm, tokenURI, tokenData, address)
suite.NoError(err)
// collection should exist
collection, err := suite.keeper.GetCollection(suite.ctx, denomID)
suite.NoError(err)
suite.NotEmpty(collection)
msg, fail := keeper.SupplyInvariant(suite.keeper)(suite.ctx)
suite.False(fail, msg)
}
func (suite *KeeperSuite) TestGetCollections() {
// MintNFT shouldn't fail when collection does not exist
err := suite.keeper.MintNFT(suite.ctx, denomID, tokenID, tokenNm, tokenURI, tokenData, address)
suite.NoError(err)
msg, fail := keeper.SupplyInvariant(suite.keeper)(suite.ctx)
suite.False(fail, msg)
}
func (suite *KeeperSuite) TestGetSupply() {
// MintNFT shouldn't fail when collection does not exist
err := suite.keeper.MintNFT(suite.ctx, denomID, tokenID, tokenNm, tokenURI, tokenData, address)
suite.NoError(err)
// MintNFT shouldn't fail when collection does not exist
err = suite.keeper.MintNFT(suite.ctx, denomID, tokenID2, tokenNm2, tokenURI, tokenData, address2)
suite.NoError(err)
// MintNFT shouldn't fail when collection does not exist
err = suite.keeper.MintNFT(suite.ctx, denomID2, tokenID, tokenNm2, tokenURI, tokenData, address2)
suite.NoError(err)
supply := suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(2), supply)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID2)
suite.Equal(uint64(1), supply)
supply = suite.keeper.GetTotalSupplyOfOwner(suite.ctx, denomID, address)
suite.Equal(uint64(1), supply)
supply = suite.keeper.GetTotalSupplyOfOwner(suite.ctx, denomID, address2)
suite.Equal(uint64(1), supply)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(2), supply)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID2)
suite.Equal(uint64(1), supply)
//burn nft
err = suite.keeper.BurnNFT(suite.ctx, denomID, tokenID, address)
suite.NoError(err)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(1), supply)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(1), supply)
//burn nft
err = suite.keeper.BurnNFT(suite.ctx, denomID, tokenID2, address2)
suite.NoError(err)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(0), supply)
supply = suite.keeper.GetTotalSupply(suite.ctx, denomID)
suite.Equal(uint64(0), supply)
}
|
package main
import "fmt"
/*
Ranging over a channel and closing it after it's done
*/
func main() {
c := make(chan int)
fmt.Println("For science!")
//SEND
go func() {
for i := 20; i >= 0; i-- {
c <- i
}
close(c) //if we don't close - deadlock
}()
//RECEIVE
for val := range c {
fmt.Println(val)
}
fmt.Println("LAUNCH")
}
|
package service
import (
"fmt"
"github.com/gorilla/mux"
"jabrok.com/global"
"net/http"
"strconv"
)
func Start() {
r := mux.NewRouter()
r.HandleFunc("/", HandleRoot).Methods("GET")
r.HandleFunc("/boom", HandleBoom).Methods("GET")
r.HandleFunc("/status", HandleStatus).Methods("GET")
r.HandleFunc("/sd", HandleShutdown).Methods("GET")
http.Handle("/", r)
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(global.GetConfig().StaticPath))))
err := http.ListenAndServe(":"+strconv.Itoa(global.GetConfig().Port), nil)
if err != nil {
fmt.Println(err)
}
}
// HandleRoot handles the top level request (root)
func HandleRoot(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello World Jabrok.com"))
}
func HandleBoom(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(getIndex()))
}
func HandleStatus(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
func HandleShutdown(w http.ResponseWriter, r *http.Request) {
global.GlobalWait.Done()
}
func getIndex() string {
return `
<html>
<head>
<title>Boom Dog!</title>
</head>
<body>
It's 7:30pm and I am ready to go home, and program some more
</body>
</html>
`
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type CoerceToDomain struct {
Xpr ast.Node
Arg ast.Node
Resulttype Oid
Resulttypmod int32
Resultcollid Oid
Coercionformat CoercionForm
Location int
}
func (n *CoerceToDomain) Pos() int {
return n.Location
}
|
// Copyright 2015,2016,2017,2018,2019 SeukWon Kang (kasworld@gmail.com)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufferpool
import (
"fmt"
"sync"
)
type Buffer []byte
// const PacketBufferSize = 0x10000
type Pool struct {
mutex sync.Mutex `webformhide:"" stringformhide:""`
name string
buffPool []Buffer
length int
count int
}
func New(name string, length, count int) *Pool {
if name == "" {
name = "Pool"
}
return &Pool{
name: name,
buffPool: make([]Buffer, 0, count),
length: length,
count: count,
}
}
func (p *Pool) String() string {
return fmt.Sprintf("%s[%v %v/%v]",
p.name, p.length, len(p.buffPool), p.count,
)
}
func (p *Pool) Get() Buffer {
p.mutex.Lock()
defer p.mutex.Unlock()
var rtn Buffer
if l := len(p.buffPool); l > 0 {
rtn = p.buffPool[l-1]
p.buffPool = p.buffPool[:l-1]
} else {
rtn = make(Buffer, p.length)
}
return rtn
}
func (p *Pool) Put(pb Buffer) {
p.mutex.Lock()
defer p.mutex.Unlock()
if len(p.buffPool) < p.count {
p.buffPool = append(p.buffPool, pb)
}
}
|
package tcp
import (
"MP1/errorchecker"
"MP1/messages"
"fmt"
"net"
"time"
)
// Configure a node to listen for tcp connections.
func (node Node) UnicastReceive() {
// Listen to an unused TCP port on localhost.
port := ":" + node.Port
listener, err := net.Listen("tcp", port)
errorchecker.CheckError(err)
defer listener.Close()
fmt.Println("Listening to tcp port " + port + " was successful!")
fmt.Println("To send a message, type: send <destination_id_number> <message>")
// Listen for TCP connections until the process is closed.
for {
// Wait for a connection from a client to our TCP port and then set up a TCP channel with them.
conn, err := listener.Accept()
errorchecker.CheckError(err)
fmt.Println("Connection to client was successful!")
// Handle client as a goroutine to be able to handle multiple clients at once.
go handleClient(conn)
}
return
}
// handleClient reads a message sent by another node, printing the message as well as the sender id and time received.
func handleClient(conn net.Conn) {
defer conn.Close()
// Read and print message sent by client through TCP channel
message := new(messages.Message)
messages.Decode(conn, message)
time := time.Now()
fmt.Println("Message received!")
fmt.Println("---------------")
fmt.Printf("Received '%s' from process %d\nmessage sent at %s\nmessage received at %s\n", message.Message, message.SenderId, message.TimeSent.Format("01-02-2006 15:04:05"), time.Format("01-02-2006 15:04:05"))
fmt.Println("---------------")
}
|
// Service to handle connections from puppet server
package impl
import (
"bufio"
"io/ioutil"
"log"
"net"
"sync"
"time"
)
type Service struct {
waitGroup *sync.WaitGroup
listener *net.TCPListener
env *EnvironmentCollection
}
// Store a pointer to environment collections to call funcs
func (s *Service) SetEnvCollection(envs *EnvironmentCollection) {
s.env = envs
}
// Wait when all connections will be handled and close listener
func (s *Service) Stop() {
s.waitGroup.Wait()
s.listener.Close()
}
// Creates new service instance
func (s Service) NewService() *Service {
var srv = &Service{
waitGroup: &sync.WaitGroup{},
}
return srv
}
// Handle listener in async way
func (s *Service) HandleListener(listener *net.TCPListener) {
s.listener = listener
for {
listener.SetDeadline(time.Now().Add(1e9))
conn, err := listener.AcceptTCP()
if nil != err {
if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
continue
}
log.Println(err)
}
s.waitGroup.Add(1)
// process the connection asynchronously
go s.HandleConnection(conn)
}
}
const (
timeout = 5 * time.Second // 5 seconds to process connection
)
// recieving data and send it to Encironment collection to process
func (s *Service) HandleConnection(conn *net.TCPConn) {
defer conn.Close()
defer s.waitGroup.Done()
// set 5 secs timeout (or we will live forever)
conn.SetDeadline(time.Now().Add(timeout))
data, _ := ioutil.ReadAll(conn)
// process report data
res := s.env.ProcessReport(data)
// if something goes wrong (not json string, for example), we say to back off
if !res {
w := bufio.NewWriter(conn)
w.WriteString("HTTP/1.1 404 Not Found\r\n\r\n")
w.Flush()
}
}
|
package main
import (
"fmt"
"math"
)
//思路:充分利用满二叉树的性质
//在第level层时,当前节点值为label,则可以找到其对称节点为b=2^level-1+2^(level-1)-label
//2^level-1+2^(level-1)即为该层对称节点总和
//找到其对称节点后,该对称节点的父节点即为当前label的上一层父节点
//然后每次循环直到label为0
func pathInZigZagTree(label int) []int {
level := int(math.Log2(float64(label)))
res := make([]int, level+1)
fmt.Println(level)
for level > 0 { //循环直到到达根节点
//存放子节点的label
res[level] = label
//值得到对称节点
label = int(math.Pow(2, float64(level))) + int(math.Pow(2, float64(level+1))) - 1 - label
//除以2,得到的即为当前之字形树下label的父节点
label >>= 1
level--
}
return res
}
|
package main
import (
"fmt"
"os"
_ "github.com/zquestz/visago/visagoapi/clarifai"
_ "github.com/zquestz/visago/visagoapi/googlevision"
_ "github.com/zquestz/visago/visagoapi/imagga"
"github.com/zquestz/visago/cmd"
)
func main() {
setupSignalHandlers()
if err := cmd.FilesCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
|
package main
/**
剑指 Offer 20. 表示数值的字符串
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
例如,字符串"+100"、"5e2"、"-123"、"3.1416"、"-1E-16"、"0123"都表示数值,但"12e"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
*/
/**
Error
*/
func IsNumber(s string) bool {
// nothing to do.
return false
}
|
package config
import (
"fmt"
"log"
"os"
"strconv"
"github.com/joho/godotenv"
)
var (
// ConnString is the database connection string
ConnString = ""
// APIPort is the port where API will listen to
APIPort = 0
)
// LoadEnv will load the environment variables
func LoadEnv() {
var error error
if error = godotenv.Load(); error != nil {
log.Fatal(error)
}
APIPort, error = strconv.Atoi(os.Getenv("API_PORT"))
if error != nil {
APIPort = 9000
}
ConnString = fmt.Sprintf("%s:%s@/%s?charset=utf8&parseTime=True&loc=Local",
os.Getenv("DB_USER"),
os.Getenv("DB_PASS"),
os.Getenv("DB_NAME"),
)
}
|
package main
import (
"encoding/csv"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"time"
)
func recursiveDirectoryHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
rw.Header().Set("Content-Type", "text/csv; charset=utf-8")
w := csv.NewWriter(rw)
w.Write([]string{"Path", "Modified", "Size", "Mode"})
return filepath.Walk(c.FSPath,
func(path string, info os.FileInfo, err error) error {
w.Write([]string{
filepath.Join(c.FSPath, path),
info.ModTime().Format("2006-01-02 15:04:05 -0700 MST"),
strconv.Itoa(int(info.Size())),
info.Mode().String(),
})
return nil // Never stop the function!
})
}
// Handle errors encountered while processing requests
// Not an AppHandlerFunc
func internalErrorHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context, err error) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(500)
err = render("error.go.html", map[string]interface{}{
"err": err,
"req": r,
}, rw)
if err != nil {
io.WriteString(rw, "Internal server error. Additionally, an error was encountered while loading the error page: "+err.Error())
}
}
func directoryListHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
dir, err := os.Open(c.FSPath)
if err != nil {
return err
}
defer dir.Close()
entries, err := dir.Readdir(0)
if err != nil {
return err
}
// FIXME: Use gen to simplify this stuff
sort.Sort(FileSort(entries))
// The view should see the path as relative to the root
// (it should not care where the root is)
relPath, _ := filepath.Rel(c.App.RootPath, c.FSPath)
relPath = filepath.Clean(
filepath.Join("/", relPath))
hostname, _ := os.Hostname()
// ViewModel
dl := DirectoryList{
Machine: hostname,
Path: relPath,
BaseInfo: c.FSInfo,
Entries: entries,
BreadCrumbs: makeBreadCrumbs(relPath),
}
return render("directory-list.go.html", dl, rw)
}
func fileHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
content, err := os.Open(c.FSPath)
if err != nil {
return err
}
if c.Format == FmtForceDownload {
rw.Header().Set("Content-Type", "application/octet-stream")
}
http.ServeContent(rw, r, c.FSPath, c.FSInfo.ModTime(), content)
return nil
}
func archiveHandlerFunc(
rw http.ResponseWriter, r *http.Request, c Context) error {
p, err := MakeArchive(c.FSPath)
if err != nil {
return err
}
defer os.Remove(p) // once served, don't hang around.
archiveFile, err := os.Open(p)
if err != nil {
return err
}
defer archiveFile.Close()
http.ServeContent(rw, r,
fmt.Sprintf("%s.zip", filepath.Base(p)),
time.Now(),
archiveFile)
return nil
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"math/bits"
"sort"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
sess "github.com/pingcap/tidb/ddl/internal/session"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
decoder "github.com/pingcap/tidb/util/rowDecoder"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/sqlexec"
kvutil "github.com/tikv/client-go/v2/util"
"go.uber.org/zap"
)
// InitAndAddColumnToTable initializes the ColumnInfo in-place and adds it to the table.
func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo {
cols := tblInfo.Columns
colInfo.ID = AllocateColumnID(tblInfo)
colInfo.State = model.StateNone
// To support add column asynchronous, we should mark its offset as the last column.
// So that we can use origin column offset to get value from row.
colInfo.Offset = len(cols)
// Append the column info to the end of the tblInfo.Columns.
// It will reorder to the right offset in "Columns" when it state change to public.
tblInfo.Columns = append(cols, colInfo)
return colInfo
}
func checkAddColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, *model.ColumnInfo,
*ast.ColumnPosition, bool /* ifNotExists */, error) {
schemaID := job.SchemaID
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, nil, false, errors.Trace(err)
}
col := &model.ColumnInfo{}
pos := &ast.ColumnPosition{}
offset := 0
ifNotExists := false
err = job.DecodeArgs(col, pos, &offset, &ifNotExists)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, nil, false, errors.Trace(err)
}
columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L)
if columnInfo != nil {
if columnInfo.State == model.StatePublic {
// We already have a column with the same column name.
job.State = model.JobStateCancelled
return nil, nil, nil, nil, ifNotExists, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name)
}
}
err = CheckAfterPositionExists(tblInfo, pos)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, nil, false, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name)
}
return tblInfo, columnInfo, col, pos, false, nil
}
func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
// Handle the rolling back job.
if job.IsRollingback() {
ver, err = onDropColumn(d, t, job)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) {
//nolint:forcetypeassert
if val.(bool) {
failpoint.Return(ver, errors.New("occur an error before decode args"))
}
})
tblInfo, columnInfo, colFromArgs, pos, ifNotExists, err := checkAddColumn(t, job)
if err != nil {
if ifNotExists && infoschema.ErrColumnExists.Equal(err) {
job.Warning = toTError(err)
job.State = model.JobStateDone
return ver, nil
}
return ver, errors.Trace(err)
}
if columnInfo == nil {
columnInfo = InitAndAddColumnToTable(tblInfo, colFromArgs)
logutil.BgLogger().Info("run add column job", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo))
if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
originalState := columnInfo.State
switch columnInfo.State {
case model.StateNone:
// none -> delete only
columnInfo.State = model.StateDeleteOnly
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != columnInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
job.SchemaState = model.StateDeleteOnly
case model.StateDeleteOnly:
// delete only -> write only
columnInfo.State = model.StateWriteOnly
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Update the job state when all affairs done.
job.SchemaState = model.StateWriteOnly
case model.StateWriteOnly:
// write only -> reorganization
columnInfo.State = model.StateWriteReorganization
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Update the job state when all affairs done.
job.SchemaState = model.StateWriteReorganization
job.MarkNonRevertible()
case model.StateWriteReorganization:
// reorganization -> public
// Adjust table column offset.
offset, err := LocateOffsetToMove(columnInfo.Offset, pos, tblInfo)
if err != nil {
return ver, errors.Trace(err)
}
tblInfo.MoveColumnInfo(columnInfo.Offset, offset)
columnInfo.State = model.StatePublic
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
asyncNotifyEvent(d, &ddlutil.Event{Tp: model.ActionAddColumn, TableInfo: tblInfo, ColumnInfos: []*model.ColumnInfo{columnInfo}})
default:
err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("column", columnInfo.State)
}
return ver, errors.Trace(err)
}
// CheckAfterPositionExists makes sure the column specified in AFTER clause is exists.
// For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1.
func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error {
if pos != nil && pos.Tp == ast.ColumnPositionAfter {
c := model.FindColumnInfo(tblInfo.Columns, pos.RelativeColumn.Name.L)
if c == nil {
return infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name)
}
}
return nil
}
func setIndicesState(indexInfos []*model.IndexInfo, state model.SchemaState) {
for _, indexInfo := range indexInfos {
indexInfo.State = state
}
}
func checkDropColumnForStatePublic(colInfo *model.ColumnInfo) (err error) {
// When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column".
// NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value.
// And we need consider the column without not-null flag.
if colInfo.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(colInfo.GetFlag()) {
// If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1,
// then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column.
// Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone.
// But currently will be ok, because we can't cancel the drop column job when the job is running,
// so the column will be dropped succeed and client will never see the wrong default value of the dropped column.
// More info about this problem, see PR#9115.
originDefVal, err := generateOriginDefaultValue(colInfo, nil)
if err != nil {
return err
}
return colInfo.SetOriginDefaultValue(originDefVal)
}
return nil
}
func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
tblInfo, colInfo, idxInfos, ifExists, err := checkDropColumn(d, t, job)
if err != nil {
if ifExists && dbterror.ErrCantDropFieldOrKey.Equal(err) {
// Convert the "not exists" error to a warning.
job.Warning = toTError(err)
job.State = model.JobStateDone
return ver, nil
}
return ver, errors.Trace(err)
}
if job.MultiSchemaInfo != nil && !job.IsRollingback() && job.MultiSchemaInfo.Revertible {
job.MarkNonRevertible()
job.SchemaState = colInfo.State
// Store the mark and enter the next DDL handling loop.
return updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, false)
}
originalState := colInfo.State
switch colInfo.State {
case model.StatePublic:
// public -> write only
colInfo.State = model.StateWriteOnly
setIndicesState(idxInfos, model.StateWriteOnly)
tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1)
err = checkDropColumnForStatePublic(colInfo)
if err != nil {
return ver, errors.Trace(err)
}
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
case model.StateWriteOnly:
// write only -> delete only
colInfo.State = model.StateDeleteOnly
tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1)
if len(idxInfos) > 0 {
newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices))
for _, idx := range tblInfo.Indices {
if !indexInfoContains(idx.ID, idxInfos) {
newIndices = append(newIndices, idx)
}
}
tblInfo.Indices = newIndices
}
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
job.Args = append(job.Args, indexInfosToIDList(idxInfos))
case model.StateDeleteOnly:
// delete only -> reorganization
colInfo.State = model.StateDeleteReorganization
tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1)
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
case model.StateDeleteReorganization:
// reorganization -> absent
// All reorganization jobs are done, drop this column.
tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1)
tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1]
colInfo.State = model.StateNone
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
// We should set related index IDs for job
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
job.Args = append(job.Args, getPartitionIDs(tblInfo))
}
default:
return ver, errors.Trace(dbterror.ErrInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State))
}
job.SchemaState = colInfo.State
return ver, errors.Trace(err)
}
func checkDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, []*model.IndexInfo, bool /* ifExists */, error) {
schemaID := job.SchemaID
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, false, errors.Trace(err)
}
var colName model.CIStr
var ifExists bool
// indexIds is used to make sure we don't truncate args when decoding the rawArgs.
var indexIds []int64
err = job.DecodeArgs(&colName, &ifExists, &indexIds)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, false, errors.Trace(err)
}
colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L)
if colInfo == nil || colInfo.Hidden {
job.State = model.JobStateCancelled
return nil, nil, nil, ifExists, dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName)
}
if err = isDroppableColumn(tblInfo, colName); err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, false, errors.Trace(err)
}
if err = checkDropColumnWithForeignKeyConstraintInOwner(d, t, job, tblInfo, colName.L); err != nil {
return nil, nil, nil, false, errors.Trace(err)
}
if err = checkDropColumnWithTTLConfig(tblInfo, colName.L); err != nil {
return nil, nil, nil, false, errors.Trace(err)
}
idxInfos := listIndicesWithColumn(colName.L, tblInfo.Indices)
return tblInfo, colInfo, idxInfos, false, nil
}
func onSetDefaultValue(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
newCol := &model.ColumnInfo{}
err := job.DecodeArgs(newCol)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
return updateColumnDefaultValue(d, t, job, newCol, &newCol.Name)
}
func needChangeColumnData(oldCol, newCol *model.ColumnInfo) bool {
toUnsigned := mysql.HasUnsignedFlag(newCol.GetFlag())
originUnsigned := mysql.HasUnsignedFlag(oldCol.GetFlag())
needTruncationOrToggleSign := func() bool {
return (newCol.GetFlen() > 0 && (newCol.GetFlen() < oldCol.GetFlen() || newCol.GetDecimal() < oldCol.GetDecimal())) ||
(toUnsigned != originUnsigned)
}
// Ignore the potential max display length represented by integer's flen, use default flen instead.
defaultOldColFlen, _ := mysql.GetDefaultFieldLengthAndDecimal(oldCol.GetType())
defaultNewColFlen, _ := mysql.GetDefaultFieldLengthAndDecimal(newCol.GetType())
needTruncationOrToggleSignForInteger := func() bool {
return (defaultNewColFlen > 0 && defaultNewColFlen < defaultOldColFlen) || (toUnsigned != originUnsigned)
}
// Deal with the same type.
if oldCol.GetType() == newCol.GetType() {
switch oldCol.GetType() {
case mysql.TypeNewDecimal:
// Since type decimal will encode the precision, frac, negative(signed) and wordBuf into storage together, there is no short
// cut to eliminate data reorg change for column type change between decimal.
return oldCol.GetFlen() != newCol.GetFlen() || oldCol.GetDecimal() != newCol.GetDecimal() || toUnsigned != originUnsigned
case mysql.TypeEnum, mysql.TypeSet:
return isElemsChangedToModifyColumn(oldCol.GetElems(), newCol.GetElems())
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
return toUnsigned != originUnsigned
case mysql.TypeString:
// Due to the behavior of padding \x00 at binary type, always change column data when binary length changed
if types.IsBinaryStr(&oldCol.FieldType) {
return newCol.GetFlen() != oldCol.GetFlen()
}
}
return needTruncationOrToggleSign()
}
if convertBetweenCharAndVarchar(oldCol.GetType(), newCol.GetType()) {
return true
}
// Deal with the different type.
switch oldCol.GetType() {
case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
switch newCol.GetType() {
case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
return needTruncationOrToggleSign()
}
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
switch newCol.GetType() {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
return needTruncationOrToggleSignForInteger()
}
// conversion between float and double needs reorganization, see issue #31372
}
return true
}
// TODO: it is used for plugins. so change plugin's using and remove it.
func convertBetweenCharAndVarchar(oldCol, newCol byte) bool {
return types.ConvertBetweenCharAndVarchar(oldCol, newCol)
}
func isElemsChangedToModifyColumn(oldElems, newElems []string) bool {
if len(newElems) < len(oldElems) {
return true
}
for index, oldElem := range oldElems {
newElem := newElems[index]
if oldElem != newElem {
return true
}
}
return false
}
type modifyingColInfo struct {
newCol *model.ColumnInfo
oldColName *model.CIStr
modifyColumnTp byte
updatedAutoRandomBits uint64
changingCol *model.ColumnInfo
changingIdxs []*model.IndexInfo
pos *ast.ColumnPosition
removedIdxs []int64
}
func getModifyColumnInfo(t *meta.Meta, job *model.Job) (*model.DBInfo, *model.TableInfo, *model.ColumnInfo, *modifyingColInfo, error) {
modifyInfo := &modifyingColInfo{pos: &ast.ColumnPosition{}}
err := job.DecodeArgs(&modifyInfo.newCol, &modifyInfo.oldColName, modifyInfo.pos, &modifyInfo.modifyColumnTp,
&modifyInfo.updatedAutoRandomBits, &modifyInfo.changingCol, &modifyInfo.changingIdxs, &modifyInfo.removedIdxs)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, modifyInfo, errors.Trace(err)
}
dbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)
if err != nil {
return nil, nil, nil, modifyInfo, errors.Trace(err)
}
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return nil, nil, nil, modifyInfo, errors.Trace(err)
}
oldCol := model.FindColumnInfo(tblInfo.Columns, modifyInfo.oldColName.L)
if oldCol == nil || oldCol.State != model.StatePublic {
job.State = model.JobStateCancelled
return nil, nil, nil, modifyInfo, errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(*(modifyInfo.oldColName), tblInfo.Name))
}
return dbInfo, tblInfo, oldCol, modifyInfo, errors.Trace(err)
}
// GetOriginDefaultValueForModifyColumn gets the original default value for modifying column.
// Since column type change is implemented as adding a new column then substituting the old one.
// Case exists when update-where statement fetch a NULL for not-null column without any default data,
// it will errors.
// So we set original default value here to prevent this error. If the oldCol has the original default value, we use it.
// Otherwise we set the zero value as original default value.
// Besides, in insert & update records, we have already implement using the casted value of relative column to insert
// rather than the original default value.
func GetOriginDefaultValueForModifyColumn(sessCtx sessionctx.Context, changingCol, oldCol *model.ColumnInfo) (interface{}, error) {
var err error
originDefVal := oldCol.GetOriginDefaultValue()
if originDefVal != nil {
odv, err := table.CastValue(sessCtx, types.NewDatum(originDefVal), changingCol, false, false)
if err != nil {
logutil.BgLogger().Info("cast origin default value failed", zap.String("category", "ddl"), zap.Error(err))
}
if !odv.IsNull() {
if originDefVal, err = odv.ToString(); err != nil {
originDefVal = nil
logutil.BgLogger().Info("convert default value to string failed", zap.String("category", "ddl"), zap.Error(err))
}
}
}
if originDefVal == nil {
originDefVal, err = generateOriginDefaultValue(changingCol, nil)
if err != nil {
return nil, errors.Trace(err)
}
}
return originDefVal, nil
}
func (w *worker) onModifyColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
dbInfo, tblInfo, oldCol, modifyInfo, err := getModifyColumnInfo(t, job)
if err != nil {
return ver, err
}
if job.IsRollingback() {
// For those column-type-change jobs which don't reorg the data.
if !needChangeColumnData(oldCol, modifyInfo.newCol) {
return rollbackModifyColumnJob(d, t, tblInfo, job, modifyInfo.newCol, oldCol, modifyInfo.modifyColumnTp)
}
// For those column-type-change jobs which reorg the data.
return rollbackModifyColumnJobWithData(d, t, tblInfo, job, oldCol, modifyInfo)
}
// If we want to rename the column name, we need to check whether it already exists.
if modifyInfo.newCol.Name.L != modifyInfo.oldColName.L {
c := model.FindColumnInfo(tblInfo.Columns, modifyInfo.newCol.Name.L)
if c != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(modifyInfo.newCol.Name))
}
}
failpoint.Inject("uninitializedOffsetAndState", func(val failpoint.Value) {
//nolint:forcetypeassert
if val.(bool) {
if modifyInfo.newCol.State != model.StatePublic {
failpoint.Return(ver, errors.New("the column state is wrong"))
}
}
})
err = checkAndApplyAutoRandomBits(d, t, dbInfo, tblInfo, oldCol, modifyInfo.newCol, modifyInfo.updatedAutoRandomBits)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
if !needChangeColumnData(oldCol, modifyInfo.newCol) {
return w.doModifyColumn(d, t, job, dbInfo, tblInfo, modifyInfo.newCol, oldCol, modifyInfo.pos)
}
if err = isGeneratedRelatedColumn(tblInfo, modifyInfo.newCol, oldCol); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
if tblInfo.Partition != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs("table is partition table"))
}
changingCol := modifyInfo.changingCol
if changingCol == nil {
newColName := model.NewCIStr(genChangingColumnUniqueName(tblInfo, oldCol))
if mysql.HasPriKeyFlag(oldCol.GetFlag()) {
job.State = model.JobStateCancelled
msg := "this column has primary key flag"
return ver, dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs(msg)
}
changingCol = modifyInfo.newCol.Clone()
changingCol.Name = newColName
changingCol.ChangeStateInfo = &model.ChangeStateInfo{DependencyColumnOffset: oldCol.Offset}
originDefVal, err := GetOriginDefaultValueForModifyColumn(newContext(d.store), changingCol, oldCol)
if err != nil {
return ver, errors.Trace(err)
}
if err = changingCol.SetOriginDefaultValue(originDefVal); err != nil {
return ver, errors.Trace(err)
}
InitAndAddColumnToTable(tblInfo, changingCol)
indexesToChange := FindRelatedIndexesToChange(tblInfo, oldCol.Name)
for _, info := range indexesToChange {
newIdxID := AllocateIndexID(tblInfo)
if !info.isTemp {
// We create a temp index for each normal index.
tmpIdx := info.IndexInfo.Clone()
tmpIdxName := genChangingIndexUniqueName(tblInfo, info.IndexInfo)
setIdxIDName(tmpIdx, newIdxID, model.NewCIStr(tmpIdxName))
SetIdxColNameOffset(tmpIdx.Columns[info.Offset], changingCol)
tblInfo.Indices = append(tblInfo.Indices, tmpIdx)
} else {
// The index is a temp index created by previous modify column job(s).
// We can overwrite it to reduce reorg cost, because it will be dropped eventually.
tmpIdx := info.IndexInfo
oldTempIdxID := tmpIdx.ID
setIdxIDName(tmpIdx, newIdxID, tmpIdx.Name /* unchanged */)
SetIdxColNameOffset(tmpIdx.Columns[info.Offset], changingCol)
modifyInfo.removedIdxs = append(modifyInfo.removedIdxs, oldTempIdxID)
}
}
} else {
changingCol = model.FindColumnInfoByID(tblInfo.Columns, modifyInfo.changingCol.ID)
if changingCol == nil {
logutil.BgLogger().Error("the changing column has been removed", zap.String("category", "ddl"), zap.Error(err))
job.State = model.JobStateCancelled
return ver, errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(oldCol.Name, tblInfo.Name))
}
}
return w.doModifyColumnTypeWithData(d, t, job, dbInfo, tblInfo, changingCol, oldCol, modifyInfo.newCol.Name, modifyInfo.pos, modifyInfo.removedIdxs)
}
func setIdxIDName(idxInfo *model.IndexInfo, newID int64, newName model.CIStr) {
idxInfo.ID = newID
idxInfo.Name = newName
}
// SetIdxColNameOffset sets index column name and offset from changing ColumnInfo.
func SetIdxColNameOffset(idxCol *model.IndexColumn, changingCol *model.ColumnInfo) {
idxCol.Name = changingCol.Name
idxCol.Offset = changingCol.Offset
canPrefix := types.IsTypePrefixable(changingCol.GetType())
if !canPrefix || (changingCol.GetFlen() < idxCol.Length) {
idxCol.Length = types.UnspecifiedLength
}
}
// rollbackModifyColumnJobWithData is used to rollback modify-column job which need to reorg the data.
func rollbackModifyColumnJobWithData(d *ddlCtx, t *meta.Meta, tblInfo *model.TableInfo, job *model.Job, oldCol *model.ColumnInfo, modifyInfo *modifyingColInfo) (ver int64, err error) {
// If the not-null change is included, we should clean the flag info in oldCol.
if modifyInfo.modifyColumnTp == mysql.TypeNull {
// Reset NotNullFlag flag.
tblInfo.Columns[oldCol.Offset].SetFlag(oldCol.GetFlag() &^ mysql.NotNullFlag)
// Reset PreventNullInsertFlag flag.
tblInfo.Columns[oldCol.Offset].SetFlag(oldCol.GetFlag() &^ mysql.PreventNullInsertFlag)
}
var changingIdxIDs []int64
if modifyInfo.changingCol != nil {
changingIdxIDs = buildRelatedIndexIDs(tblInfo, modifyInfo.changingCol.ID)
// The job is in the middle state. The appended changingCol and changingIndex should
// be removed from the tableInfo as well.
removeChangingColAndIdxs(tblInfo, modifyInfo.changingCol.ID)
}
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
// Reconstruct the job args to add the temporary index ids into delete range table.
job.Args = []interface{}{changingIdxIDs, getPartitionIDs(tblInfo)}
return ver, nil
}
func removeChangingColAndIdxs(tblInfo *model.TableInfo, changingColID int64) {
restIdx := tblInfo.Indices[:0]
for _, idx := range tblInfo.Indices {
if !idx.HasColumnInIndexColumns(tblInfo, changingColID) {
restIdx = append(restIdx, idx)
}
}
tblInfo.Indices = restIdx
restCols := tblInfo.Columns[:0]
for _, c := range tblInfo.Columns {
if c.ID != changingColID {
restCols = append(restCols, c)
}
}
tblInfo.Columns = restCols
}
func (w *worker) doModifyColumnTypeWithData(
d *ddlCtx, t *meta.Meta, job *model.Job,
dbInfo *model.DBInfo, tblInfo *model.TableInfo, changingCol, oldCol *model.ColumnInfo,
colName model.CIStr, pos *ast.ColumnPosition, rmIdxIDs []int64) (ver int64, _ error) {
var err error
originalState := changingCol.State
targetCol := changingCol.Clone()
targetCol.Name = colName
changingIdxs := buildRelatedIndexInfos(tblInfo, changingCol.ID)
switch changingCol.State {
case model.StateNone:
// Column from null to not null.
if !mysql.HasNotNullFlag(oldCol.GetFlag()) && mysql.HasNotNullFlag(changingCol.GetFlag()) {
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
err := modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, targetCol, oldCol.GetType() != changingCol.GetType())
if err != nil {
if dbterror.ErrWarnDataTruncated.Equal(err) || dbterror.ErrInvalidUseOfNull.Equal(err) {
job.State = model.JobStateRollingback
}
return ver, err
}
}
// none -> delete only
updateChangingObjState(changingCol, changingIdxs, model.StateDeleteOnly)
failpoint.Inject("mockInsertValueAfterCheckNull", func(val failpoint.Value) {
if valStr, ok := val.(string); ok {
var sctx sessionctx.Context
sctx, err := w.sessPool.Get()
if err != nil {
failpoint.Return(ver, err)
}
defer w.sessPool.Put(sctx)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL)
//nolint:forcetypeassert
_, _, err = sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, valStr)
if err != nil {
job.State = model.JobStateCancelled
failpoint.Return(ver, err)
}
}
})
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != changingCol.State)
if err != nil {
return ver, errors.Trace(err)
}
// Make sure job args change after `updateVersionAndTableInfoWithCheck`, otherwise, the job args will
// be updated in `updateDDLJob` even if it meets an error in `updateVersionAndTableInfoWithCheck`.
job.SchemaState = model.StateDeleteOnly
metrics.GetBackfillProgressByLabel(metrics.LblModifyColumn, job.SchemaName, tblInfo.Name.String()).Set(0)
job.Args = append(job.Args, changingCol, changingIdxs, rmIdxIDs)
case model.StateDeleteOnly:
// Column from null to not null.
if !mysql.HasNotNullFlag(oldCol.GetFlag()) && mysql.HasNotNullFlag(changingCol.GetFlag()) {
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
err := modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, targetCol, oldCol.GetType() != changingCol.GetType())
if err != nil {
if dbterror.ErrWarnDataTruncated.Equal(err) || dbterror.ErrInvalidUseOfNull.Equal(err) {
job.State = model.JobStateRollingback
}
return ver, err
}
}
// delete only -> write only
updateChangingObjState(changingCol, changingIdxs, model.StateWriteOnly)
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != changingCol.State)
if err != nil {
return ver, errors.Trace(err)
}
job.SchemaState = model.StateWriteOnly
case model.StateWriteOnly:
// write only -> reorganization
updateChangingObjState(changingCol, changingIdxs, model.StateWriteReorganization)
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != changingCol.State)
if err != nil {
return ver, errors.Trace(err)
}
// Initialize SnapshotVer to 0 for later reorganization check.
job.SnapshotVer = 0
job.SchemaState = model.StateWriteReorganization
case model.StateWriteReorganization:
tbl, err := getTable(d.store, dbInfo.ID, tblInfo)
if err != nil {
return ver, errors.Trace(err)
}
var done bool
if job.MultiSchemaInfo != nil {
done, ver, err = doReorgWorkForModifyColumnMultiSchema(w, d, t, job, tbl, oldCol, changingCol, changingIdxs)
} else {
done, ver, err = doReorgWorkForModifyColumn(w, d, t, job, tbl, oldCol, changingCol, changingIdxs)
}
if !done {
return ver, err
}
rmIdxIDs = append(buildRelatedIndexIDs(tblInfo, oldCol.ID), rmIdxIDs...)
err = adjustTableInfoAfterModifyColumnWithData(tblInfo, pos, oldCol, changingCol, colName, changingIdxs)
if err != nil {
job.State = model.JobStateRollingback
return ver, errors.Trace(err)
}
updateChangingObjState(changingCol, changingIdxs, model.StatePublic)
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != changingCol.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
// Refactor the job args to add the old index ids into delete range table.
job.Args = []interface{}{rmIdxIDs, getPartitionIDs(tblInfo)}
asyncNotifyEvent(d, &ddlutil.Event{Tp: model.ActionModifyColumn, TableInfo: tblInfo, ColumnInfos: []*model.ColumnInfo{changingCol}})
default:
err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("column", changingCol.State)
}
return ver, errors.Trace(err)
}
func doReorgWorkForModifyColumnMultiSchema(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table,
oldCol, changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) (done bool, ver int64, err error) {
if job.MultiSchemaInfo.Revertible {
done, ver, err = doReorgWorkForModifyColumn(w, d, t, job, tbl, oldCol, changingCol, changingIdxs)
if done {
// We need another round to wait for all the others sub-jobs to finish.
job.MarkNonRevertible()
}
// We need another round to run the reorg process.
return false, ver, err
}
// Non-revertible means all the sub jobs finished.
return true, ver, err
}
func doReorgWorkForModifyColumn(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table,
oldCol, changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) (done bool, ver int64, err error) {
job.ReorgMeta.ReorgTp = model.ReorgTypeTxn
sctx, err1 := w.sessPool.Get()
if err1 != nil {
err = errors.Trace(err1)
return
}
defer w.sessPool.Put(sctx)
rh := newReorgHandler(sess.NewSession(sctx))
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return false, ver, errors.Trace(err)
}
reorgInfo, err := getReorgInfo(d.jobContext(job.ID), d, rh, job, dbInfo, tbl, BuildElements(changingCol, changingIdxs), false)
if err != nil || reorgInfo == nil || reorgInfo.first {
// If we run reorg firstly, we should update the job snapshot version
// and then run the reorg next time.
return false, ver, errors.Trace(err)
}
// Inject a failpoint so that we can pause here and do verification on other components.
// With a failpoint-enabled version of TiDB, you can trigger this failpoint by the following command:
// enable: curl -X PUT -d "pause" "http://127.0.0.1:10080/fail/github.com/pingcap/tidb/ddl/mockDelayInModifyColumnTypeWithData".
// disable: curl -X DELETE "http://127.0.0.1:10080/fail/github.com/pingcap/tidb/ddl/mockDelayInModifyColumnTypeWithData"
failpoint.Inject("mockDelayInModifyColumnTypeWithData", func() {})
err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (addIndexErr error) {
defer util.Recover(metrics.LabelDDL, "onModifyColumn",
func() {
addIndexErr = dbterror.ErrCancelledDDLJob.GenWithStack("modify table `%v` column `%v` panic", tbl.Meta().Name, oldCol.Name)
}, false)
// Use old column name to generate less confusing error messages.
changingColCpy := changingCol.Clone()
changingColCpy.Name = oldCol.Name
return w.updateCurrentElement(tbl, reorgInfo)
})
if err != nil {
if dbterror.ErrPausedDDLJob.Equal(err) {
return false, ver, nil
}
if dbterror.ErrWaitReorgTimeout.Equal(err) {
// If timeout, we should return, check for the owner and re-wait job done.
return false, ver, nil
}
if kv.IsTxnRetryableError(err) || dbterror.ErrNotOwner.Equal(err) {
return false, ver, errors.Trace(err)
}
if err1 := rh.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil {
logutil.BgLogger().Warn("run modify column job failed, RemoveDDLReorgHandle failed, can't convert job to rollback", zap.String("category", "ddl"),
zap.String("job", job.String()), zap.Error(err1))
}
logutil.BgLogger().Warn("run modify column job failed, convert job to rollback", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Error(err))
job.State = model.JobStateRollingback
return false, ver, errors.Trace(err)
}
return true, ver, nil
}
func adjustTableInfoAfterModifyColumnWithData(tblInfo *model.TableInfo, pos *ast.ColumnPosition,
oldCol, changingCol *model.ColumnInfo, newName model.CIStr, changingIdxs []*model.IndexInfo) (err error) {
if pos != nil && pos.RelativeColumn != nil && oldCol.Name.L == pos.RelativeColumn.Name.L {
// For cases like `modify column b after b`, it should report this error.
return errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(oldCol.Name, tblInfo.Name))
}
internalColName := changingCol.Name
changingCol = replaceOldColumn(tblInfo, oldCol, changingCol, newName)
if len(changingIdxs) > 0 {
updateNewIdxColsNameOffset(changingIdxs, internalColName, changingCol)
indexesToRemove := filterIndexesToRemove(changingIdxs, newName, tblInfo)
replaceOldIndexes(tblInfo, indexesToRemove)
}
if tblInfo.TTLInfo != nil {
updateTTLInfoWhenModifyColumn(tblInfo, oldCol.Name, changingCol.Name)
}
// Move the new column to a correct offset.
destOffset, err := LocateOffsetToMove(changingCol.Offset, pos, tblInfo)
if err != nil {
return errors.Trace(err)
}
tblInfo.MoveColumnInfo(changingCol.Offset, destOffset)
return nil
}
func replaceOldColumn(tblInfo *model.TableInfo, oldCol, changingCol *model.ColumnInfo,
newName model.CIStr) *model.ColumnInfo {
tblInfo.MoveColumnInfo(changingCol.Offset, len(tblInfo.Columns)-1)
changingCol = updateChangingCol(changingCol, newName, oldCol.Offset)
tblInfo.Columns[oldCol.Offset] = changingCol
tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1]
return changingCol
}
func replaceOldIndexes(tblInfo *model.TableInfo, changingIdxs []*model.IndexInfo) {
// Remove the changing indexes.
for i, idx := range tblInfo.Indices {
for _, cIdx := range changingIdxs {
if cIdx.ID == idx.ID {
tblInfo.Indices[i] = nil
break
}
}
}
tmp := tblInfo.Indices[:0]
for _, idx := range tblInfo.Indices {
if idx != nil {
tmp = append(tmp, idx)
}
}
tblInfo.Indices = tmp
// Replace the old indexes with changing indexes.
for _, cIdx := range changingIdxs {
// The index name should be changed from '_Idx$_name' to 'name'.
idxName := getChangingIndexOriginName(cIdx)
for i, idx := range tblInfo.Indices {
if strings.EqualFold(idxName, idx.Name.O) {
cIdx.Name = model.NewCIStr(idxName)
tblInfo.Indices[i] = cIdx
break
}
}
}
}
// updateNewIdxColsNameOffset updates the name&offset of the index column.
func updateNewIdxColsNameOffset(changingIdxs []*model.IndexInfo,
oldName model.CIStr, changingCol *model.ColumnInfo) {
for _, idx := range changingIdxs {
for _, col := range idx.Columns {
if col.Name.L == oldName.L {
SetIdxColNameOffset(col, changingCol)
}
}
}
}
func updateFKInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol model.CIStr) {
if oldCol.L == newCol.L {
return
}
for _, fk := range tblInfo.ForeignKeys {
for i := range fk.Cols {
if fk.Cols[i].L == oldCol.L {
fk.Cols[i] = newCol
}
}
}
}
func updateTTLInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol model.CIStr) {
if oldCol.L == newCol.L {
return
}
if tblInfo.TTLInfo != nil {
if tblInfo.TTLInfo.ColumnName.L == oldCol.L {
tblInfo.TTLInfo.ColumnName = newCol
}
}
}
// filterIndexesToRemove filters out the indexes that can be removed.
func filterIndexesToRemove(changingIdxs []*model.IndexInfo, colName model.CIStr, tblInfo *model.TableInfo) []*model.IndexInfo {
indexesToRemove := make([]*model.IndexInfo, 0, len(changingIdxs))
for _, idx := range changingIdxs {
var hasOtherChangingCol bool
for _, col := range idx.Columns {
if col.Name.L == colName.L {
continue // ignore the current modifying column.
}
if !hasOtherChangingCol {
hasOtherChangingCol = tblInfo.Columns[col.Offset].ChangeStateInfo != nil
}
}
// For the indexes that still contains other changing column, skip removing it now.
// We leave the removal work to the last modify column job.
if !hasOtherChangingCol {
indexesToRemove = append(indexesToRemove, idx)
}
}
return indexesToRemove
}
func updateChangingCol(col *model.ColumnInfo, newName model.CIStr, newOffset int) *model.ColumnInfo {
col.Name = newName
col.ChangeStateInfo = nil
col.Offset = newOffset
// After changing the column, the column's type is change, so it needs to set OriginDefaultValue back
// so that there is no error in getting the default value from OriginDefaultValue.
// Besides, nil data that was not backfilled in the "add column" is backfilled after the column is changed.
// So it can set OriginDefaultValue to nil.
col.OriginDefaultValue = nil
return col
}
func buildRelatedIndexInfos(tblInfo *model.TableInfo, colID int64) []*model.IndexInfo {
var indexInfos []*model.IndexInfo
for _, idx := range tblInfo.Indices {
if idx.HasColumnInIndexColumns(tblInfo, colID) {
indexInfos = append(indexInfos, idx)
}
}
return indexInfos
}
func buildRelatedIndexIDs(tblInfo *model.TableInfo, colID int64) []int64 {
var oldIdxIDs []int64
for _, idx := range tblInfo.Indices {
if idx.HasColumnInIndexColumns(tblInfo, colID) {
oldIdxIDs = append(oldIdxIDs, idx.ID)
}
}
return oldIdxIDs
}
// LocateOffsetToMove returns the offset of the column to move.
func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error) {
if pos == nil {
return currentOffset, nil
}
// Get column offset.
switch pos.Tp {
case ast.ColumnPositionFirst:
return 0, nil
case ast.ColumnPositionAfter:
c := model.FindColumnInfo(tblInfo.Columns, pos.RelativeColumn.Name.L)
if c == nil || c.State != model.StatePublic {
return 0, infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name)
}
if currentOffset <= c.Offset {
return c.Offset, nil
}
return c.Offset + 1, nil
case ast.ColumnPositionNone:
return currentOffset, nil
default:
return 0, errors.Errorf("unknown column position type")
}
}
// BuildElements is exported for testing.
func BuildElements(changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) []*meta.Element {
elements := make([]*meta.Element, 0, len(changingIdxs)+1)
elements = append(elements, &meta.Element{ID: changingCol.ID, TypeKey: meta.ColumnElementKey})
for _, idx := range changingIdxs {
elements = append(elements, &meta.Element{ID: idx.ID, TypeKey: meta.IndexElementKey})
}
return elements
}
func (w *worker) updatePhysicalTableRow(t table.Table, reorgInfo *reorgInfo) error {
logutil.BgLogger().Info("start to update table row", zap.String("category", "ddl"), zap.String("job", reorgInfo.Job.String()), zap.String("reorgInfo", reorgInfo.String()))
if tbl, ok := t.(table.PartitionedTable); ok {
done := false
for !done {
p := tbl.GetPartition(reorgInfo.PhysicalTableID)
if p == nil {
return dbterror.ErrCancelledDDLJob.GenWithStack("Can not find partition id %d for table %d", reorgInfo.PhysicalTableID, t.Meta().ID)
}
workType := typeReorgPartitionWorker
switch reorgInfo.Job.Type {
case model.ActionReorganizePartition,
model.ActionRemovePartitioning,
model.ActionAlterTablePartitioning:
// Expected
default:
// workType = typeUpdateColumnWorker
// TODO: Support Modify Column on partitioned table
// https://github.com/pingcap/tidb/issues/38297
return dbterror.ErrCancelledDDLJob.GenWithStack("Modify Column on partitioned table / typeUpdateColumnWorker not yet supported.")
}
err := w.writePhysicalTableRecord(w.sessPool, p, workType, reorgInfo)
if err != nil {
return err
}
done, err = updateReorgInfo(w.sessPool, tbl, reorgInfo)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
if tbl, ok := t.(table.PhysicalTable); ok {
return w.writePhysicalTableRecord(w.sessPool, tbl, typeUpdateColumnWorker, reorgInfo)
}
return dbterror.ErrCancelledDDLJob.GenWithStack("internal error for phys tbl id: %d tbl id: %d", reorgInfo.PhysicalTableID, t.Meta().ID)
}
// TestReorgGoroutineRunning is only used in test to indicate the reorg goroutine has been started.
var TestReorgGoroutineRunning = make(chan interface{})
// updateCurrentElement update the current element for reorgInfo.
func (w *worker) updateCurrentElement(t table.Table, reorgInfo *reorgInfo) error {
failpoint.Inject("mockInfiniteReorgLogic", func(val failpoint.Value) {
//nolint:forcetypeassert
if val.(bool) {
a := new(interface{})
TestReorgGoroutineRunning <- a
for {
time.Sleep(30 * time.Millisecond)
if w.isReorgCancelled(reorgInfo.Job.ID) {
// Job is cancelled. So it can't be done.
failpoint.Return(dbterror.ErrCancelledDDLJob)
}
}
}
})
// TODO: Support partition tables.
if bytes.Equal(reorgInfo.currElement.TypeKey, meta.ColumnElementKey) {
//nolint:forcetypeassert
err := w.updatePhysicalTableRow(t.(table.PhysicalTable), reorgInfo)
if err != nil {
return errors.Trace(err)
}
}
if _, ok := t.(table.PartitionedTable); ok {
// TODO: remove when modify column of partitioned table is supported
// https://github.com/pingcap/tidb/issues/38297
return dbterror.ErrCancelledDDLJob.GenWithStack("Modify Column on partitioned table / typeUpdateColumnWorker not yet supported.")
}
// Get the original start handle and end handle.
currentVer, err := getValidCurrentVersion(reorgInfo.d.store)
if err != nil {
return errors.Trace(err)
}
//nolint:forcetypeassert
originalStartHandle, originalEndHandle, err := getTableRange(reorgInfo.d.jobContext(reorgInfo.Job.ID), reorgInfo.d, t.(table.PhysicalTable), currentVer.Ver, reorgInfo.Job.Priority)
if err != nil {
return errors.Trace(err)
}
startElementOffset := 0
startElementOffsetToResetHandle := -1
// This backfill job starts with backfilling index data, whose index ID is currElement.ID.
if bytes.Equal(reorgInfo.currElement.TypeKey, meta.IndexElementKey) {
for i, element := range reorgInfo.elements[1:] {
if reorgInfo.currElement.ID == element.ID {
startElementOffset = i
startElementOffsetToResetHandle = i
break
}
}
}
for i := startElementOffset; i < len(reorgInfo.elements[1:]); i++ {
// This backfill job has been exited during processing. At that time, the element is reorgInfo.elements[i+1] and handle range is [reorgInfo.StartHandle, reorgInfo.EndHandle].
// Then the handle range of the rest elements' is [originalStartHandle, originalEndHandle].
if i == startElementOffsetToResetHandle+1 {
reorgInfo.StartKey, reorgInfo.EndKey = originalStartHandle, originalEndHandle
}
// Update the element in the reorgInfo for updating the reorg meta below.
reorgInfo.currElement = reorgInfo.elements[i+1]
// Write the reorg info to store so the whole reorganize process can recover from panic.
err := reorgInfo.UpdateReorgMeta(reorgInfo.StartKey, w.sessPool)
logutil.BgLogger().Info("update column and indexes", zap.String("category", "ddl"),
zap.Int64("job ID", reorgInfo.Job.ID),
zap.Stringer("element", reorgInfo.currElement),
zap.String("start key", hex.EncodeToString(reorgInfo.StartKey)),
zap.String("end key", hex.EncodeToString(reorgInfo.EndKey)))
if err != nil {
return errors.Trace(err)
}
err = w.addTableIndex(t, reorgInfo)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
type updateColumnWorker struct {
*backfillCtx
oldColInfo *model.ColumnInfo
newColInfo *model.ColumnInfo
// The following attributes are used to reduce memory allocation.
rowRecords []*rowRecord
rowDecoder *decoder.RowDecoder
rowMap map[int64]types.Datum
checksumBuffer rowcodec.RowData
checksumNeeded bool
}
func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *updateColumnWorker {
if !bytes.Equal(reorgInfo.currElement.TypeKey, meta.ColumnElementKey) {
logutil.BgLogger().Error("Element type for updateColumnWorker incorrect", zap.String("jobQuery", reorgInfo.Query),
zap.String("reorgInfo", reorgInfo.String()))
return nil
}
var oldCol, newCol *model.ColumnInfo
for _, col := range t.WritableCols() {
if col.ID == reorgInfo.currElement.ID {
newCol = col.ColumnInfo
oldCol = table.FindCol(t.Cols(), getChangingColumnOriginName(newCol)).ColumnInfo
break
}
}
rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap)
checksumNeeded := false
failpoint.Inject("forceRowLevelChecksumOnUpdateColumnBackfill", func() {
orig := variable.EnableRowLevelChecksum.Load()
defer variable.EnableRowLevelChecksum.Store(orig)
variable.EnableRowLevelChecksum.Store(true)
})
// We use global `EnableRowLevelChecksum` to detect whether checksum is enabled in ddl backfill worker because
// `SessionVars.IsRowLevelChecksumEnabled` will filter out internal sessions.
if variable.EnableRowLevelChecksum.Load() {
if numNonPubCols := len(t.DeletableCols()) - len(t.Cols()); numNonPubCols > 1 {
cols := make([]*model.ColumnInfo, len(t.DeletableCols()))
for i, col := range t.DeletableCols() {
cols[i] = col.ToInfo()
}
logutil.BgLogger().Warn("skip checksum in update-column backfill since the number of non-public columns is greater than 1",
zap.String("jobQuery", reorgInfo.Query), zap.String("reorgInfo", reorgInfo.String()), zap.Any("cols", cols))
} else {
checksumNeeded = true
}
}
return &updateColumnWorker{
backfillCtx: newBackfillCtx(reorgInfo.d, id, sessCtx, reorgInfo.SchemaName, t, jc, "update_col_rate", false),
oldColInfo: oldCol,
newColInfo: newCol,
rowDecoder: rowDecoder,
rowMap: make(map[int64]types.Datum, len(decodeColMap)),
checksumNeeded: checksumNeeded,
}
}
func (w *updateColumnWorker) AddMetricInfo(cnt float64) {
w.metricCounter.Add(cnt)
}
func (*updateColumnWorker) String() string {
return typeUpdateColumnWorker.String()
}
func (w *updateColumnWorker) GetCtx() *backfillCtx {
return w.backfillCtx
}
type rowRecord struct {
key []byte // It's used to lock a record. Record it to reduce the encoding time.
vals []byte // It's the record.
warning *terror.Error // It's used to record the cast warning of a record.
}
// getNextHandleKey gets next handle of entry that we are going to process.
func getNextHandleKey(taskRange reorgBackfillTask,
taskDone bool, lastAccessedHandle kv.Key) (nextHandle kv.Key) {
if !taskDone {
// The task is not done. So we need to pick the last processed entry's handle and add one.
return lastAccessedHandle.Next()
}
return taskRange.endKey.Next()
}
func (w *updateColumnWorker) fetchRowColVals(txn kv.Transaction, taskRange reorgBackfillTask) ([]*rowRecord, kv.Key, bool, error) {
w.rowRecords = w.rowRecords[:0]
startTime := time.Now()
// taskDone means that the added handle is out of taskRange.endHandle.
taskDone := false
var lastAccessedHandle kv.Key
oprStartTime := startTime
err := iterateSnapshotKeys(w.jobContext, w.sessCtx.GetStore(), taskRange.priority, taskRange.physicalTable.RecordPrefix(),
txn.StartTS(), taskRange.startKey, taskRange.endKey, func(handle kv.Handle, recordKey kv.Key, rawRow []byte) (bool, error) {
oprEndTime := time.Now()
logSlowOperations(oprEndTime.Sub(oprStartTime), "iterateSnapshotKeys in updateColumnWorker fetchRowColVals", 0)
oprStartTime = oprEndTime
taskDone = recordKey.Cmp(taskRange.endKey) >= 0
if taskDone || len(w.rowRecords) >= w.batchCnt {
return false, nil
}
if err1 := w.getRowRecord(handle, recordKey, rawRow); err1 != nil {
return false, errors.Trace(err1)
}
lastAccessedHandle = recordKey
if recordKey.Cmp(taskRange.endKey) == 0 {
taskDone = true
return false, nil
}
return true, nil
})
if len(w.rowRecords) == 0 {
taskDone = true
}
logutil.BgLogger().Debug("txn fetches handle info", zap.String("category", "ddl"), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("taskRange", taskRange.String()), zap.Duration("takeTime", time.Since(startTime)))
return w.rowRecords, getNextHandleKey(taskRange, taskDone, lastAccessedHandle), taskDone, errors.Trace(err)
}
func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, rawRow []byte) error {
sysTZ := w.sessCtx.GetSessionVars().StmtCtx.TimeZone
_, err := w.rowDecoder.DecodeTheExistedColumnMap(w.sessCtx, handle, rawRow, sysTZ, w.rowMap)
if err != nil {
return errors.Trace(dbterror.ErrCantDecodeRecord.GenWithStackByArgs("column", err))
}
if _, ok := w.rowMap[w.newColInfo.ID]; ok {
// The column is already added by update or insert statement, skip it.
w.cleanRowMap()
return nil
}
var recordWarning *terror.Error
// Since every updateColumnWorker handle their own work individually, we can cache warning in statement context when casting datum.
oldWarn := w.sessCtx.GetSessionVars().StmtCtx.GetWarnings()
if oldWarn == nil {
oldWarn = []stmtctx.SQLWarn{}
} else {
oldWarn = oldWarn[:0]
}
w.sessCtx.GetSessionVars().StmtCtx.SetWarnings(oldWarn)
val := w.rowMap[w.oldColInfo.ID]
col := w.newColInfo
if val.Kind() == types.KindNull && col.FieldType.GetType() == mysql.TypeTimestamp && mysql.HasNotNullFlag(col.GetFlag()) {
if v, err := expression.GetTimeCurrentTimestamp(w.sessCtx, col.GetType(), col.GetDecimal()); err == nil {
// convert null value to timestamp should be substituted with current timestamp if NOT_NULL flag is set.
w.rowMap[w.oldColInfo.ID] = v
}
}
newColVal, err := table.CastValue(w.sessCtx, w.rowMap[w.oldColInfo.ID], w.newColInfo, false, false)
if err != nil {
return w.reformatErrors(err)
}
warn := w.sessCtx.GetSessionVars().StmtCtx.GetWarnings()
if len(warn) != 0 {
//nolint:forcetypeassert
recordWarning = errors.Cause(w.reformatErrors(warn[0].Err)).(*terror.Error)
}
failpoint.Inject("MockReorgTimeoutInOneRegion", func(val failpoint.Value) {
//nolint:forcetypeassert
if val.(bool) {
if handle.IntValue() == 3000 && atomic.CompareAndSwapInt32(&TestCheckReorgTimeout, 0, 1) {
failpoint.Return(errors.Trace(dbterror.ErrWaitReorgTimeout))
}
}
})
w.rowMap[w.newColInfo.ID] = newColVal
_, err = w.rowDecoder.EvalRemainedExprColumnMap(w.sessCtx, w.rowMap)
if err != nil {
return errors.Trace(err)
}
newColumnIDs := make([]int64, 0, len(w.rowMap))
newRow := make([]types.Datum, 0, len(w.rowMap))
for colID, val := range w.rowMap {
newColumnIDs = append(newColumnIDs, colID)
newRow = append(newRow, val)
}
checksums := w.calcChecksums()
sctx, rd := w.sessCtx.GetSessionVars().StmtCtx, &w.sessCtx.GetSessionVars().RowEncoder
newRowVal, err := tablecodec.EncodeRow(sctx, newRow, newColumnIDs, nil, nil, rd, checksums...)
if err != nil {
return errors.Trace(err)
}
w.rowRecords = append(w.rowRecords, &rowRecord{key: recordKey, vals: newRowVal, warning: recordWarning})
w.cleanRowMap()
return nil
}
func (w *updateColumnWorker) calcChecksums() []uint32 {
if !w.checksumNeeded {
return nil
}
// when w.checksumNeeded is true, it indicates that there is only one write-reorg column (the new column) and other
// columns are public, thus we have to calculate two checksums that one of which only contains the old column and
// the other only contains the new column.
var checksums [2]uint32
for i, id := range []int64{w.newColInfo.ID, w.oldColInfo.ID} {
if len(w.checksumBuffer.Cols) > 0 {
w.checksumBuffer.Cols = w.checksumBuffer.Cols[:0]
}
for _, col := range w.table.DeletableCols() {
if col.ID == id || (col.IsGenerated() && !col.GeneratedStored) {
continue
}
d := w.rowMap[col.ID]
w.checksumBuffer.Cols = append(w.checksumBuffer.Cols, rowcodec.ColData{ColumnInfo: col.ToInfo(), Datum: &d})
}
if !sort.IsSorted(w.checksumBuffer) {
sort.Sort(w.checksumBuffer)
}
checksum, err := w.checksumBuffer.Checksum()
if err != nil {
logutil.BgLogger().Warn("skip checksum in update-column backfill due to encode error", zap.Error(err))
return nil
}
checksums[i] = checksum
}
return checksums[:]
}
// reformatErrors casted error because `convertTo` function couldn't package column name and datum value for some errors.
func (w *updateColumnWorker) reformatErrors(err error) error {
// Since row count is not precious in concurrent reorganization, here we substitute row count with datum value.
if types.ErrTruncated.Equal(err) || types.ErrDataTooLong.Equal(err) {
dStr := datumToStringNoErr(w.rowMap[w.oldColInfo.ID])
err = types.ErrTruncated.GenWithStack("Data truncated for column '%s', value is '%s'", w.oldColInfo.Name, dStr)
}
if types.ErrWarnDataOutOfRange.Equal(err) {
dStr := datumToStringNoErr(w.rowMap[w.oldColInfo.ID])
err = types.ErrWarnDataOutOfRange.GenWithStack("Out of range value for column '%s', the value is '%s'", w.oldColInfo.Name, dStr)
}
return err
}
func datumToStringNoErr(d types.Datum) string {
if v, err := d.ToString(); err == nil {
return v
}
return fmt.Sprintf("%v", d.GetValue())
}
func (w *updateColumnWorker) cleanRowMap() {
for id := range w.rowMap {
delete(w.rowMap, id)
}
}
// BackfillData will backfill the table record in a transaction. A lock corresponds to a rowKey if the value of rowKey is changed.
func (w *updateColumnWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) {
oprStartTime := time.Now()
ctx := kv.WithInternalSourceAndTaskType(context.Background(), w.jobContext.ddlJobSourceType(), kvutil.ExplicitTypeDDL)
errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error {
taskCtx.addedCount = 0
taskCtx.scanCount = 0
// Because TiCDC do not want this kind of change,
// so we set the lossy DDL reorg txn source to 1 to
// avoid TiCDC to replicate this kind of change.
var txnSource uint64
if val := txn.GetOption(kv.TxnSource); val != nil {
txnSource, _ = val.(uint64)
}
err := kv.SetLossyDDLReorgSource(&txnSource, kv.LossyDDLColumnReorgSource)
if err != nil {
return errors.Trace(err)
}
txn.SetOption(kv.TxnSource, txnSource)
txn.SetOption(kv.Priority, handleRange.priority)
if tagger := w.GetCtx().getResourceGroupTaggerForTopSQL(handleRange.getJobID()); tagger != nil {
txn.SetOption(kv.ResourceGroupTagger, tagger)
}
rowRecords, nextKey, taskDone, err := w.fetchRowColVals(txn, handleRange)
if err != nil {
return errors.Trace(err)
}
taskCtx.nextKey = nextKey
taskCtx.done = taskDone
// Optimize for few warnings!
warningsMap := make(map[errors.ErrorID]*terror.Error, 2)
warningsCountMap := make(map[errors.ErrorID]int64, 2)
for _, rowRecord := range rowRecords {
taskCtx.scanCount++
err = txn.Set(rowRecord.key, rowRecord.vals)
if err != nil {
return errors.Trace(err)
}
taskCtx.addedCount++
if rowRecord.warning != nil {
if _, ok := warningsCountMap[rowRecord.warning.ID()]; ok {
warningsCountMap[rowRecord.warning.ID()]++
} else {
warningsCountMap[rowRecord.warning.ID()] = 1
warningsMap[rowRecord.warning.ID()] = rowRecord.warning
}
}
}
// Collect the warnings.
taskCtx.warnings, taskCtx.warningsCount = warningsMap, warningsCountMap
return nil
})
logSlowOperations(time.Since(oprStartTime), "BackfillData", 3000)
return
}
func updateChangingObjState(changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo, schemaState model.SchemaState) {
changingCol.State = schemaState
for _, idx := range changingIdxs {
idx.State = schemaState
}
}
// doModifyColumn updates the column information and reorders all columns. It does not support modifying column data.
func (w *worker) doModifyColumn(
d *ddlCtx, t *meta.Meta, job *model.Job, dbInfo *model.DBInfo, tblInfo *model.TableInfo,
newCol, oldCol *model.ColumnInfo, pos *ast.ColumnPosition) (ver int64, _ error) {
if oldCol.ID != newCol.ID {
job.State = model.JobStateRollingback
return ver, dbterror.ErrColumnInChange.GenWithStackByArgs(oldCol.Name, newCol.ID)
}
// Column from null to not null.
if !mysql.HasNotNullFlag(oldCol.GetFlag()) && mysql.HasNotNullFlag(newCol.GetFlag()) {
noPreventNullFlag := !mysql.HasPreventNullInsertFlag(oldCol.GetFlag())
// lease = 0 means it's in an integration test. In this case we don't delay so the test won't run too slowly.
// We need to check after the flag is set
if d.lease > 0 && !noPreventNullFlag {
delayForAsyncCommit()
}
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
err := modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, newCol, oldCol.GetType() != newCol.GetType())
if err != nil {
if dbterror.ErrWarnDataTruncated.Equal(err) || dbterror.ErrInvalidUseOfNull.Equal(err) {
job.State = model.JobStateRollingback
}
return ver, err
}
// The column should get into prevent null status first.
if noPreventNullFlag {
return updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true)
}
}
if job.MultiSchemaInfo != nil && job.MultiSchemaInfo.Revertible {
job.MarkNonRevertible()
// Store the mark and enter the next DDL handling loop.
return updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, false)
}
if err := adjustTableInfoAfterModifyColumn(tblInfo, newCol, oldCol, pos); err != nil {
job.State = model.JobStateRollingback
return ver, errors.Trace(err)
}
childTableInfos, err := adjustForeignKeyChildTableInfoAfterModifyColumn(d, t, job, tblInfo, newCol, oldCol)
if err != nil {
return ver, errors.Trace(err)
}
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true, childTableInfos...)
if err != nil {
// Modified the type definition of 'null' to 'not null' before this, so rollBack the job when an error occurs.
job.State = model.JobStateRollingback
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
// For those column-type-change type which doesn't need reorg data, we should also mock the job args for delete range.
job.Args = []interface{}{[]int64{}, []int64{}}
return ver, nil
}
func adjustTableInfoAfterModifyColumn(
tblInfo *model.TableInfo, newCol, oldCol *model.ColumnInfo, pos *ast.ColumnPosition) error {
// We need the latest column's offset and state. This information can be obtained from the store.
newCol.Offset = oldCol.Offset
newCol.State = oldCol.State
if pos != nil && pos.RelativeColumn != nil && oldCol.Name.L == pos.RelativeColumn.Name.L {
// For cases like `modify column b after b`, it should report this error.
return errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(oldCol.Name, tblInfo.Name))
}
destOffset, err := LocateOffsetToMove(oldCol.Offset, pos, tblInfo)
if err != nil {
return errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(oldCol.Name, tblInfo.Name))
}
tblInfo.Columns[oldCol.Offset] = newCol
tblInfo.MoveColumnInfo(oldCol.Offset, destOffset)
updateNewIdxColsNameOffset(tblInfo.Indices, oldCol.Name, newCol)
updateFKInfoWhenModifyColumn(tblInfo, oldCol.Name, newCol.Name)
updateTTLInfoWhenModifyColumn(tblInfo, oldCol.Name, newCol.Name)
return nil
}
func adjustForeignKeyChildTableInfoAfterModifyColumn(d *ddlCtx, t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, newCol, oldCol *model.ColumnInfo) ([]schemaIDAndTableInfo, error) {
if !variable.EnableForeignKey.Load() || newCol.Name.L == oldCol.Name.L {
return nil, nil
}
is, err := getAndCheckLatestInfoSchema(d, t)
if err != nil {
return nil, err
}
referredFKs := is.GetTableReferredForeignKeys(job.SchemaName, tblInfo.Name.L)
if len(referredFKs) == 0 {
return nil, nil
}
fkh := newForeignKeyHelper()
fkh.addLoadedTable(job.SchemaName, tblInfo.Name.L, job.SchemaID, tblInfo)
for _, referredFK := range referredFKs {
info, err := fkh.getTableFromStorage(is, t, referredFK.ChildSchema, referredFK.ChildTable)
if err != nil {
if infoschema.ErrTableNotExists.Equal(err) || infoschema.ErrDatabaseNotExists.Equal(err) {
continue
}
return nil, err
}
fkInfo := model.FindFKInfoByName(info.tblInfo.ForeignKeys, referredFK.ChildFKName.L)
if fkInfo == nil {
continue
}
for i := range fkInfo.RefCols {
if fkInfo.RefCols[i].L == oldCol.Name.L {
fkInfo.RefCols[i] = newCol.Name
}
}
}
infoList := make([]schemaIDAndTableInfo, 0, len(fkh.loaded))
for _, info := range fkh.loaded {
if info.tblInfo.ID == tblInfo.ID {
continue
}
infoList = append(infoList, info)
}
return infoList, nil
}
func checkAndApplyAutoRandomBits(d *ddlCtx, m *meta.Meta, dbInfo *model.DBInfo, tblInfo *model.TableInfo,
oldCol *model.ColumnInfo, newCol *model.ColumnInfo, newAutoRandBits uint64) error {
if newAutoRandBits == 0 {
return nil
}
idAcc := m.GetAutoIDAccessors(dbInfo.ID, tblInfo.ID)
err := checkNewAutoRandomBits(idAcc, oldCol, newCol, newAutoRandBits, tblInfo.AutoRandomRangeBits, tblInfo.SepAutoInc())
if err != nil {
return err
}
return applyNewAutoRandomBits(d, m, dbInfo, tblInfo, oldCol, newAutoRandBits)
}
// checkNewAutoRandomBits checks whether the new auto_random bits number can cause overflow.
func checkNewAutoRandomBits(idAccessors meta.AutoIDAccessors, oldCol *model.ColumnInfo,
newCol *model.ColumnInfo, newShardBits, newRangeBits uint64, sepAutoInc bool) error {
shardFmt := autoid.NewShardIDFormat(&newCol.FieldType, newShardBits, newRangeBits)
idAcc := idAccessors.RandomID()
convertedFromAutoInc := mysql.HasAutoIncrementFlag(oldCol.GetFlag())
if convertedFromAutoInc {
if sepAutoInc {
idAcc = idAccessors.IncrementID(model.TableInfoVersion5)
} else {
idAcc = idAccessors.RowID()
}
}
// Generate a new auto ID first to prevent concurrent update in DML.
_, err := idAcc.Inc(1)
if err != nil {
return err
}
currentIncBitsVal, err := idAcc.Get()
if err != nil {
return err
}
// Find the max number of available shard bits by
// counting leading zeros in current inc part of auto_random ID.
usedBits := uint64(64 - bits.LeadingZeros64(uint64(currentIncBitsVal)))
if usedBits > shardFmt.IncrementalBits {
overflowCnt := usedBits - shardFmt.IncrementalBits
errMsg := fmt.Sprintf(autoid.AutoRandomOverflowErrMsg, newShardBits-overflowCnt, newShardBits, oldCol.Name.O)
return dbterror.ErrInvalidAutoRandom.GenWithStackByArgs(errMsg)
}
return nil
}
// applyNewAutoRandomBits set auto_random bits to TableInfo and
// migrate auto_increment ID to auto_random ID if possible.
func applyNewAutoRandomBits(d *ddlCtx, m *meta.Meta, dbInfo *model.DBInfo,
tblInfo *model.TableInfo, oldCol *model.ColumnInfo, newAutoRandBits uint64) error {
tblInfo.AutoRandomBits = newAutoRandBits
needMigrateFromAutoIncToAutoRand := mysql.HasAutoIncrementFlag(oldCol.GetFlag())
if !needMigrateFromAutoIncToAutoRand {
return nil
}
autoRandAlloc := autoid.NewAllocatorsFromTblInfo(d.store, dbInfo.ID, tblInfo).Get(autoid.AutoRandomType)
if autoRandAlloc == nil {
errMsg := fmt.Sprintf(autoid.AutoRandomAllocatorNotFound, dbInfo.Name.O, tblInfo.Name.O)
return dbterror.ErrInvalidAutoRandom.GenWithStackByArgs(errMsg)
}
idAcc := m.GetAutoIDAccessors(dbInfo.ID, tblInfo.ID).RowID()
nextAutoIncID, err := idAcc.Get()
if err != nil {
return errors.Trace(err)
}
err = autoRandAlloc.Rebase(context.Background(), nextAutoIncID, false)
if err != nil {
return errors.Trace(err)
}
if err := idAcc.Del(); err != nil {
return errors.Trace(err)
}
return nil
}
// checkForNullValue ensure there are no null values of the column of this table.
// `isDataTruncated` indicates whether the new field and the old field type are the same, in order to be compatible with mysql.
func checkForNullValue(ctx context.Context, sctx sessionctx.Context, isDataTruncated bool, schema, table model.CIStr, newCol *model.ColumnInfo, oldCols ...*model.ColumnInfo) error {
needCheckNullValue := false
for _, oldCol := range oldCols {
if oldCol.GetType() != mysql.TypeTimestamp && newCol.GetType() == mysql.TypeTimestamp {
// special case for convert null value of non-timestamp type to timestamp type, null value will be substituted with current timestamp.
continue
}
needCheckNullValue = true
}
if !needCheckNullValue {
return nil
}
var buf strings.Builder
buf.WriteString("select 1 from %n.%n where ")
paramsList := make([]interface{}, 0, 2+len(oldCols))
paramsList = append(paramsList, schema.L, table.L)
for i, col := range oldCols {
if i == 0 {
buf.WriteString("%n is null")
paramsList = append(paramsList, col.Name.L)
} else {
buf.WriteString(" or %n is null")
paramsList = append(paramsList, col.Name.L)
}
}
buf.WriteString(" limit 1")
//nolint:forcetypeassert
rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, buf.String(), paramsList...)
if err != nil {
return errors.Trace(err)
}
rowCount := len(rows)
if rowCount != 0 {
if isDataTruncated {
return dbterror.ErrWarnDataTruncated.GenWithStackByArgs(newCol.Name.L, rowCount)
}
return dbterror.ErrInvalidUseOfNull
}
return nil
}
func updateColumnDefaultValue(d *ddlCtx, t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *model.CIStr) (ver int64, _ error) {
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
if job.MultiSchemaInfo != nil && job.MultiSchemaInfo.Revertible {
job.MarkNonRevertible()
// Store the mark and enter the next DDL handling loop.
return updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, false)
}
oldCol := model.FindColumnInfo(tblInfo.Columns, oldColName.L)
if oldCol == nil || oldCol.State != model.StatePublic {
job.State = model.JobStateCancelled
return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(newCol.Name, tblInfo.Name)
}
if hasDefaultValue, _, err := checkColumnDefaultValue(newContext(d.store), table.ToColumn(oldCol.Clone()), newCol.DefaultValue); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
} else if !hasDefaultValue {
job.State = model.JobStateCancelled
return ver, dbterror.ErrInvalidDefaultValue.GenWithStackByArgs(newCol.Name)
}
// The newCol's offset may be the value of the old schema version, so we can't use newCol directly.
oldCol.DefaultValue = newCol.DefaultValue
oldCol.DefaultValueBit = newCol.DefaultValueBit
oldCol.DefaultIsExpr = newCol.DefaultIsExpr
if mysql.HasNoDefaultValueFlag(newCol.GetFlag()) {
oldCol.AddFlag(mysql.NoDefaultValueFlag)
} else {
oldCol.DelFlag(mysql.NoDefaultValueFlag)
sctx := newContext(d.store)
err = checkDefaultValue(sctx, table.ToColumn(oldCol), true)
if err != nil {
job.State = model.JobStateCancelled
return ver, err
}
}
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
}
func isColumnWithIndex(colName string, indices []*model.IndexInfo) bool {
for _, indexInfo := range indices {
for _, col := range indexInfo.Columns {
if col.Name.L == colName {
return true
}
}
}
return false
}
func isColumnCanDropWithIndex(colName string, indices []*model.IndexInfo) error {
for _, indexInfo := range indices {
if indexInfo.Primary || len(indexInfo.Columns) > 1 {
for _, col := range indexInfo.Columns {
if col.Name.L == colName {
return dbterror.ErrCantDropColWithIndex.GenWithStack("can't drop column %s with composite index covered or Primary Key covered now", colName)
}
}
}
}
return nil
}
func listIndicesWithColumn(colName string, indices []*model.IndexInfo) []*model.IndexInfo {
ret := make([]*model.IndexInfo, 0)
for _, indexInfo := range indices {
if len(indexInfo.Columns) == 1 && colName == indexInfo.Columns[0].Name.L {
ret = append(ret, indexInfo)
}
}
return ret
}
// GetColumnForeignKeyInfo returns the wanted foreign key info
func GetColumnForeignKeyInfo(colName string, fkInfos []*model.FKInfo) *model.FKInfo {
for _, fkInfo := range fkInfos {
for _, col := range fkInfo.Cols {
if col.L == colName {
return fkInfo
}
}
}
return nil
}
// AllocateColumnID allocates next column ID from TableInfo.
func AllocateColumnID(tblInfo *model.TableInfo) int64 {
tblInfo.MaxColumnID++
return tblInfo.MaxColumnID
}
func checkAddColumnTooManyColumns(colNum int) error {
if uint32(colNum) > atomic.LoadUint32(&config.GetGlobalConfig().TableColumnCountLimit) {
return dbterror.ErrTooManyFields
}
return nil
}
// rollbackModifyColumnJob rollbacks the job when an error occurs.
func rollbackModifyColumnJob(d *ddlCtx, t *meta.Meta, tblInfo *model.TableInfo, job *model.Job, newCol, oldCol *model.ColumnInfo, modifyColumnTp byte) (ver int64, _ error) {
var err error
if oldCol.ID == newCol.ID && modifyColumnTp == mysql.TypeNull {
// field NotNullFlag flag reset.
tblInfo.Columns[oldCol.Offset].SetFlag(oldCol.GetFlag() &^ mysql.NotNullFlag)
// field PreventNullInsertFlag flag reset.
tblInfo.Columns[oldCol.Offset].SetFlag(oldCol.GetFlag() &^ mysql.PreventNullInsertFlag)
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
}
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
// For those column-type-change type which doesn't need reorg data, we should also mock the job args for delete range.
job.Args = []interface{}{[]int64{}, []int64{}}
return ver, nil
}
// modifyColsFromNull2NotNull modifies the type definitions of 'null' to 'not null'.
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
func modifyColsFromNull2NotNull(w *worker, dbInfo *model.DBInfo, tblInfo *model.TableInfo, cols []*model.ColumnInfo, newCol *model.ColumnInfo, isDataTruncated bool) error {
// Get sessionctx from context resource pool.
var sctx sessionctx.Context
sctx, err := w.sessPool.Get()
if err != nil {
return errors.Trace(err)
}
defer w.sessPool.Put(sctx)
skipCheck := false
failpoint.Inject("skipMockContextDoExec", func(val failpoint.Value) {
//nolint:forcetypeassert
if val.(bool) {
skipCheck = true
}
})
if !skipCheck {
// If there is a null value inserted, it cannot be modified and needs to be rollback.
err = checkForNullValue(w.ctx, sctx, isDataTruncated, dbInfo.Name, tblInfo.Name, newCol, cols...)
if err != nil {
return errors.Trace(err)
}
}
// Prevent this field from inserting null values.
for _, col := range cols {
col.AddFlag(mysql.PreventNullInsertFlag)
}
return nil
}
func generateOriginDefaultValue(col *model.ColumnInfo, ctx sessionctx.Context) (interface{}, error) {
var err error
odValue := col.GetDefaultValue()
if odValue == nil && mysql.HasNotNullFlag(col.GetFlag()) {
switch col.GetType() {
// Just use enum field's first element for OriginDefaultValue.
case mysql.TypeEnum:
defEnum, verr := types.ParseEnumValue(col.GetElems(), 1)
if verr != nil {
return nil, errors.Trace(verr)
}
defVal := types.NewCollateMysqlEnumDatum(defEnum, col.GetCollate())
return defVal.ToString()
default:
zeroVal := table.GetZeroValue(col)
odValue, err = zeroVal.ToString()
if err != nil {
return nil, errors.Trace(err)
}
}
}
if odValue == strings.ToUpper(ast.CurrentTimestamp) {
var t time.Time
if ctx == nil {
t = time.Now()
} else {
t, _ = expression.GetStmtTimestamp(ctx)
}
if col.GetType() == mysql.TypeTimestamp {
odValue = types.NewTime(types.FromGoTime(t.UTC()), col.GetType(), col.GetDecimal()).String()
} else if col.GetType() == mysql.TypeDatetime {
odValue = types.NewTime(types.FromGoTime(t), col.GetType(), col.GetDecimal()).String()
}
}
return odValue, nil
}
// isVirtualGeneratedColumn checks the column if it is virtual.
func isVirtualGeneratedColumn(col *model.ColumnInfo) bool {
if col.IsGenerated() && !col.GeneratedStored {
return true
}
return false
}
func indexInfoContains(idxID int64, idxInfos []*model.IndexInfo) bool {
for _, idxInfo := range idxInfos {
if idxID == idxInfo.ID {
return true
}
}
return false
}
func indexInfosToIDList(idxInfos []*model.IndexInfo) []int64 {
ids := make([]int64, 0, len(idxInfos))
for _, idxInfo := range idxInfos {
ids = append(ids, idxInfo.ID)
}
return ids
}
func genChangingColumnUniqueName(tblInfo *model.TableInfo, oldCol *model.ColumnInfo) string {
suffix := 0
newColumnNamePrefix := fmt.Sprintf("%s%s", changingColumnPrefix, oldCol.Name.O)
newColumnLowerName := fmt.Sprintf("%s_%d", strings.ToLower(newColumnNamePrefix), suffix)
// Check whether the new column name is used.
columnNameMap := make(map[string]bool, len(tblInfo.Columns))
for _, col := range tblInfo.Columns {
columnNameMap[col.Name.L] = true
}
for columnNameMap[newColumnLowerName] {
suffix++
newColumnLowerName = fmt.Sprintf("%s_%d", strings.ToLower(newColumnNamePrefix), suffix)
}
return fmt.Sprintf("%s_%d", newColumnNamePrefix, suffix)
}
func genChangingIndexUniqueName(tblInfo *model.TableInfo, idxInfo *model.IndexInfo) string {
suffix := 0
newIndexNamePrefix := fmt.Sprintf("%s%s", changingIndexPrefix, idxInfo.Name.O)
newIndexLowerName := fmt.Sprintf("%s_%d", strings.ToLower(newIndexNamePrefix), suffix)
// Check whether the new index name is used.
indexNameMap := make(map[string]bool, len(tblInfo.Indices))
for _, idx := range tblInfo.Indices {
indexNameMap[idx.Name.L] = true
}
for indexNameMap[newIndexLowerName] {
suffix++
newIndexLowerName = fmt.Sprintf("%s_%d", strings.ToLower(newIndexNamePrefix), suffix)
}
return fmt.Sprintf("%s_%d", newIndexNamePrefix, suffix)
}
func getChangingIndexOriginName(changingIdx *model.IndexInfo) string {
idxName := strings.TrimPrefix(changingIdx.Name.O, changingIndexPrefix)
// Since the unique idxName may contain the suffix number (indexName_num), better trim the suffix.
var pos int
if pos = strings.LastIndex(idxName, "_"); pos == -1 {
return idxName
}
return idxName[:pos]
}
func getChangingColumnOriginName(changingColumn *model.ColumnInfo) string {
columnName := strings.TrimPrefix(changingColumn.Name.O, changingColumnPrefix)
var pos int
if pos = strings.LastIndex(columnName, "_"); pos == -1 {
return columnName
}
return columnName[:pos]
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import "fmt"
// PlanTrace indicates for the Plan trace information
type PlanTrace struct {
mapChildren map[int]struct{}
TP string `json:"type"`
ProperType string `json:"property"`
// ExplainInfo should be implemented by each implemented Plan
ExplainInfo string `json:"info"`
Children []*PlanTrace `json:"-"`
ChildrenID []int `json:"children"`
ID int `json:"id"`
Cost float64 `json:"cost"`
Selected bool `json:"selected"`
}
// AppendChildrenID appends children ids
func (p *PlanTrace) AppendChildrenID(ids ...int) {
if p.mapChildren == nil {
p.mapChildren = make(map[int]struct{})
}
for _, id := range ids {
_, existed := p.mapChildren[id]
if existed {
continue
}
p.mapChildren[id] = struct{}{}
p.ChildrenID = append(p.ChildrenID, id)
}
}
// LogicalOptimizeTracer indicates the trace for the whole logicalOptimize processing
type LogicalOptimizeTracer struct {
// curRuleTracer indicates the current rule Tracer during optimize by rule
curRuleTracer *LogicalRuleOptimizeTracer
FinalLogicalPlan []*PlanTrace `json:"final"`
Steps []*LogicalRuleOptimizeTracer `json:"steps"`
}
// AppendRuleTracerBeforeRuleOptimize add plan tracer before optimize
func (tracer *LogicalOptimizeTracer) AppendRuleTracerBeforeRuleOptimize(index int, name string, before *PlanTrace) {
ruleTracer := buildLogicalRuleOptimizeTracerBeforeOptimize(index, name, before)
tracer.Steps = append(tracer.Steps, ruleTracer)
tracer.curRuleTracer = ruleTracer
}
// AppendRuleTracerStepToCurrent add rule optimize step to current
func (tracer *LogicalOptimizeTracer) AppendRuleTracerStepToCurrent(id int, tp, reason, action string) {
index := len(tracer.curRuleTracer.Steps)
tracer.curRuleTracer.Steps = append(tracer.curRuleTracer.Steps, LogicalRuleOptimizeTraceStep{
ID: id,
TP: tp,
Reason: reason,
Action: action,
Index: index,
})
}
// RecordFinalLogicalPlan add plan trace after logical optimize
func (tracer *LogicalOptimizeTracer) RecordFinalLogicalPlan(final *PlanTrace) {
tracer.FinalLogicalPlan = toFlattenPlanTrace(final)
tracer.removeUselessStep()
}
func (tracer *LogicalOptimizeTracer) removeUselessStep() {
newSteps := make([]*LogicalRuleOptimizeTracer, 0)
for _, step := range tracer.Steps {
if len(step.Steps) > 0 {
newSteps = append(newSteps, step)
}
}
tracer.Steps = newSteps
}
// LogicalRuleOptimizeTracer indicates the trace for the LogicalPlan tree before and after
// logical rule optimize
type LogicalRuleOptimizeTracer struct {
RuleName string `json:"name"`
Before []*PlanTrace `json:"before"`
Steps []LogicalRuleOptimizeTraceStep `json:"steps"`
Index int `json:"index"`
}
// buildLogicalRuleOptimizeTracerBeforeOptimize build rule tracer before rule optimize
func buildLogicalRuleOptimizeTracerBeforeOptimize(
index int, name string, before *PlanTrace) *LogicalRuleOptimizeTracer {
return &LogicalRuleOptimizeTracer{
Index: index,
Before: toFlattenPlanTrace(before),
RuleName: name,
Steps: make([]LogicalRuleOptimizeTraceStep, 0),
}
}
// LogicalRuleOptimizeTraceStep indicates the trace for the detailed optimize changing in
// logical rule optimize
type LogicalRuleOptimizeTraceStep struct {
Action string `json:"action"`
Reason string `json:"reason"`
TP string `json:"type"`
ID int `json:"id"`
Index int `json:"index"`
}
// toFlattenPlanTrace transform plan into PlanTrace
func toFlattenPlanTrace(root *PlanTrace) []*PlanTrace {
wrapper := &flattenWrapper{flatten: make([]*PlanTrace, 0)}
flattenLogicalPlanTrace(root, wrapper)
return wrapper.flatten
}
type flattenWrapper struct {
flatten []*PlanTrace
}
func flattenLogicalPlanTrace(node *PlanTrace, wrapper *flattenWrapper) {
newNode := &PlanTrace{
ID: node.ID,
TP: node.TP,
ChildrenID: make([]int, 0),
Cost: node.Cost,
ExplainInfo: node.ExplainInfo,
}
if len(node.Children) < 1 {
wrapper.flatten = append(wrapper.flatten, newNode)
return
}
for _, child := range node.Children {
newNode.AppendChildrenID(child.ID)
}
for _, child := range node.Children {
flattenLogicalPlanTrace(child, wrapper)
}
wrapper.flatten = append(wrapper.flatten, newNode)
}
// CETraceRecord records an expression and related cardinality estimation result.
type CETraceRecord struct {
TableName string `json:"table_name"`
Type string `json:"type"`
Expr string `json:"expr"`
TableID int64 `json:"-"`
RowCount uint64 `json:"row_count"`
}
// DedupCETrace deduplicate a slice of *CETraceRecord and return the deduplicated slice
func DedupCETrace(records []*CETraceRecord) []*CETraceRecord {
ret := make([]*CETraceRecord, 0, len(records))
exists := make(map[CETraceRecord]struct{}, len(records))
for _, rec := range records {
if _, ok := exists[*rec]; !ok {
ret = append(ret, rec)
exists[*rec] = struct{}{}
}
}
return ret
}
// PhysicalOptimizeTracer indicates the trace for the whole physicalOptimize processing
type PhysicalOptimizeTracer struct {
PhysicalPlanCostDetails map[string]*PhysicalPlanCostDetail `json:"costs"`
Candidates map[int]*CandidatePlanTrace `json:"candidates"`
// final indicates the final physical plan trace
Final []*PlanTrace `json:"final"`
}
// AppendCandidate appends physical CandidatePlanTrace in tracer.
// If the candidate already exists, the previous candidate would be covered depends on whether it has mapping logical plan
func (tracer *PhysicalOptimizeTracer) AppendCandidate(c *CandidatePlanTrace) {
old, exists := tracer.Candidates[c.ID]
if exists && len(old.MappingLogicalPlan) > 0 && len(c.MappingLogicalPlan) < 1 {
return
}
tracer.Candidates[c.ID] = c
}
// RecordFinalPlanTrace records final physical plan trace
func (tracer *PhysicalOptimizeTracer) RecordFinalPlanTrace(root *PlanTrace) {
tracer.Final = toFlattenPlanTrace(root)
tracer.buildCandidatesInfo()
}
// CandidatePlanTrace indicates info for candidate
type CandidatePlanTrace struct {
*PlanTrace
MappingLogicalPlan string `json:"mapping"`
}
// buildCandidatesInfo builds candidates info
func (tracer *PhysicalOptimizeTracer) buildCandidatesInfo() {
if tracer == nil || len(tracer.Candidates) < 1 {
return
}
fID := make(map[int]struct{}, len(tracer.Final))
for _, plan := range tracer.Final {
fID[plan.ID] = struct{}{}
}
for _, candidate := range tracer.Candidates {
if _, ok := fID[candidate.ID]; ok {
candidate.Selected = true
}
}
}
// CodecPlanName returns tp_id of plan.
func CodecPlanName(tp string, id int) string {
return fmt.Sprintf("%v_%v", tp, id)
}
// OptimizeTracer indicates tracer for optimizer
type OptimizeTracer struct {
// Logical indicates logical plan
Logical *LogicalOptimizeTracer `json:"logical"`
// Physical indicates physical plan
Physical *PhysicalOptimizeTracer `json:"physical"`
// FinalPlan indicates the plan after post optimize
FinalPlan []*PlanTrace `json:"final"`
// IsFastPlan indicates whether the plan is generated by fast plan
IsFastPlan bool `json:"isFastPlan"`
}
// SetFastPlan sets fast plan
func (tracer *OptimizeTracer) SetFastPlan(final *PlanTrace) {
tracer.FinalPlan = toFlattenPlanTrace(final)
tracer.IsFastPlan = true
}
// RecordFinalPlan records plan after post optimize
func (tracer *OptimizeTracer) RecordFinalPlan(final *PlanTrace) {
tracer.FinalPlan = toFlattenPlanTrace(final)
}
// PhysicalPlanCostDetail indicates cost detail
type PhysicalPlanCostDetail struct {
Params map[string]interface{} `json:"params"`
TP string `json:"type"`
Desc string `json:"desc"`
ID int `json:"id"`
Cost float64 `json:"cost"`
}
// PhysicalPlanCostParam indicates cost params
type PhysicalPlanCostParam struct {
Params map[string]interface{} `json:"params"`
Name string `json:"name"`
Desc string `json:"desc"`
ID int `json:"id"`
Cost float64 `json:"cost"`
}
// NewPhysicalPlanCostDetail creates a cost detail
func NewPhysicalPlanCostDetail(id int, tp string) *PhysicalPlanCostDetail {
return &PhysicalPlanCostDetail{
ID: id,
TP: tp,
Params: make(map[string]interface{}),
}
}
// AddParam adds param
func (d *PhysicalPlanCostDetail) AddParam(k string, v interface{}) *PhysicalPlanCostDetail {
// discard empty param value
if s, ok := v.(string); ok && len(s) < 1 {
return d
}
d.Params[k] = v
return d
}
// SetDesc sets desc
func (d *PhysicalPlanCostDetail) SetDesc(desc string) {
d.Desc = desc
}
// GetPlanID gets plan id
func (d *PhysicalPlanCostDetail) GetPlanID() int {
return d.ID
}
// GetPlanType gets plan type
func (d *PhysicalPlanCostDetail) GetPlanType() string {
return d.TP
}
// Exists checks whether key exists in params
func (d *PhysicalPlanCostDetail) Exists(k string) bool {
_, ok := d.Params[k]
return ok
}
|
package template
// ParamsMain is the data structure for the Control Plane Initializer template.
type ParamsMain struct {
IAMRoles *ParamsMainIAMRoles
}
|
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pricing_test
import (
"bytes"
"context"
"io/ioutil"
"math/big"
"reflect"
"testing"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/p2p/streamtest"
pricermock "github.com/ethersphere/bee/pkg/pricer/mock"
"github.com/ethersphere/bee/pkg/pricing"
"github.com/ethersphere/bee/pkg/pricing/pb"
"github.com/ethersphere/bee/pkg/swarm"
)
type testThresholdObserver struct {
called bool
peer swarm.Address
paymentThreshold *big.Int
}
type testPriceTableObserver struct {
called bool
peer swarm.Address
priceTable []uint64
}
func (t *testThresholdObserver) NotifyPaymentThreshold(peerAddr swarm.Address, paymentThreshold *big.Int) error {
t.called = true
t.peer = peerAddr
t.paymentThreshold = paymentThreshold
return nil
}
func (t *testPriceTableObserver) NotifyPriceTable(peerAddr swarm.Address, priceTable []uint64) error {
t.called = true
t.peer = peerAddr
t.priceTable = priceTable
return nil
}
func TestAnnouncePaymentThreshold(t *testing.T) {
logger := logging.New(ioutil.Discard, 0)
testThreshold := big.NewInt(100000)
observer := &testThresholdObserver{}
pricerMockService := pricermock.NewMockService()
recipient := pricing.New(nil, logger, testThreshold, pricerMockService)
recipient.SetPaymentThresholdObserver(observer)
peerID := swarm.MustParseHexAddress("9ee7add7")
recorder := streamtest.New(
streamtest.WithProtocols(recipient.Protocol()),
streamtest.WithBaseAddr(peerID),
)
payer := pricing.New(recorder, logger, testThreshold, pricerMockService)
paymentThreshold := big.NewInt(10000)
err := payer.AnnouncePaymentThreshold(context.Background(), peerID, paymentThreshold)
if err != nil {
t.Fatal(err)
}
records, err := recorder.Records(peerID, "pricing", "1.0.0", "pricing")
if err != nil {
t.Fatal(err)
}
if l := len(records); l != 1 {
t.Fatalf("got %v records, want %v", l, 1)
}
record := records[0]
messages, err := protobuf.ReadMessages(
bytes.NewReader(record.In()),
func() protobuf.Message { return new(pb.AnnouncePaymentThreshold) },
)
if err != nil {
t.Fatal(err)
}
if len(messages) != 1 {
t.Fatalf("got %v messages, want %v", len(messages), 1)
}
sentPaymentThreshold := big.NewInt(0).SetBytes(messages[0].(*pb.AnnouncePaymentThreshold).PaymentThreshold)
if sentPaymentThreshold.Cmp(paymentThreshold) != 0 {
t.Fatalf("got message with amount %v, want %v", sentPaymentThreshold, paymentThreshold)
}
if !observer.called {
t.Fatal("expected observer to be called")
}
if observer.paymentThreshold.Cmp(paymentThreshold) != 0 {
t.Fatalf("observer called with wrong paymentThreshold. got %d, want %d", observer.paymentThreshold, paymentThreshold)
}
if !observer.peer.Equal(peerID) {
t.Fatalf("observer called with wrong peer. got %v, want %v", observer.peer, peerID)
}
}
func TestAnnouncePaymentThresholdAndPriceTable(t *testing.T) {
logger := logging.New(ioutil.Discard, 0)
testThreshold := big.NewInt(100000)
observer1 := &testThresholdObserver{}
observer2 := &testPriceTableObserver{}
table := []uint64{50, 25, 12, 6}
priceTableFunc := func() []uint64 {
return table
}
pricerMockService := pricermock.NewMockService(pricermock.WithPriceTableFunc(priceTableFunc))
recipient := pricing.New(nil, logger, testThreshold, pricerMockService)
recipient.SetPaymentThresholdObserver(observer1)
recipient.SetPriceTableObserver(observer2)
peerID := swarm.MustParseHexAddress("9ee7add7")
recorder := streamtest.New(
streamtest.WithProtocols(recipient.Protocol()),
streamtest.WithBaseAddr(peerID),
)
payer := pricing.New(recorder, logger, testThreshold, pricerMockService)
paymentThreshold := big.NewInt(10000)
err := payer.AnnouncePaymentThresholdAndPriceTable(context.Background(), peerID, paymentThreshold)
if err != nil {
t.Fatal(err)
}
records, err := recorder.Records(peerID, "pricing", "1.0.0", "pricing")
if err != nil {
t.Fatal(err)
}
if l := len(records); l != 1 {
t.Fatalf("got %v records, want %v", l, 1)
}
record := records[0]
messages, err := protobuf.ReadMessages(
bytes.NewReader(record.In()),
func() protobuf.Message { return new(pb.AnnouncePaymentThreshold) },
)
if err != nil {
t.Fatal(err)
}
if len(messages) != 1 {
t.Fatalf("got %v messages, want %v", len(messages), 1)
}
sentPaymentThreshold := big.NewInt(0).SetBytes(messages[0].(*pb.AnnouncePaymentThreshold).PaymentThreshold)
if sentPaymentThreshold.Cmp(paymentThreshold) != 0 {
t.Fatalf("got message with amount %v, want %v", sentPaymentThreshold, paymentThreshold)
}
sentPriceTable := messages[0].(*pb.AnnouncePaymentThreshold).ProximityPrice
if !reflect.DeepEqual(sentPriceTable, table) {
t.Fatalf("got message with table %v, want %v", sentPriceTable, table)
}
if !observer1.called {
t.Fatal("expected threshold observer to be called")
}
if observer1.paymentThreshold.Cmp(paymentThreshold) != 0 {
t.Fatalf("observer called with wrong paymentThreshold. got %d, want %d", observer1.paymentThreshold, paymentThreshold)
}
if !observer1.peer.Equal(peerID) {
t.Fatalf("threshold observer called with wrong peer. got %v, want %v", observer1.peer, peerID)
}
if !observer2.called {
t.Fatal("expected table observer to be called")
}
if !reflect.DeepEqual(observer2.priceTable, table) {
t.Fatalf("table observer called with wrong priceTable. got %d, want %d", observer2.priceTable, table)
}
if !observer2.peer.Equal(peerID) {
t.Fatalf("table observer called with wrong peer. got %v, want %v", observer2.peer, peerID)
}
}
|
package main
import "fmt"
func generadorimPares() func() int {
i := int(1) // i permanecerá en el clousure de la función anónima a retornar
return func() int {
var impar = i
i += 2
return impar
}
}
func main() {
nextimPar := generadorimPares()
fmt.Println(nextimPar())
fmt.Println(nextimPar())
fmt.Println(nextimPar())
fmt.Println(nextimPar())
fmt.Println(nextimPar())
}
|
package main
import (
"flag"
"fmt"
"os"
)
const Usage = `
addBlock --data DATA "add a block to block chain"
printChain "print all blocks"
`
type CLI struct {
bc *BlockChian
}
func (cli *CLI)Run() {
if len(os.Args) < 2 {
fmt.Println("too few parameters!\n", Usage)
os.Exit(1)
}
addBlockCmd := flag.NewFlagSet("addBlock", flag.ExitOnError)
printChainCmd := flag.NewFlagSet("printChain", flag.ExitOnError)
addBlockCmdPara := addBlockCmd.String("data", "", "block info")
switch os.Args[1] {
case "addBlock":
err := addBlockCmd.Parse(os.Args[2:])
CheckErr(err)
if addBlockCmd.Parsed() {
if *addBlockCmdPara == "" {
fmt.Println("data is empty")
os.Exit(1)
}
cli.AddBlock(*addBlockCmdPara)
}
case "printChain":
err := printChainCmd.Parse(os.Args[2:])
CheckErr(err)
if printChainCmd.Parsed() {
cli.PrintChain()
}
default:
fmt.Println("invalid cmd\n", Usage)
os.Exit(1)
}
}
|
package treecmds
import (
"fmt"
"strings"
"github.com/Nv7-Github/Nv7Haven/eod/trees"
"github.com/Nv7-Github/Nv7Haven/eod/types"
"github.com/bwmarrin/discordgo"
)
func (b *TreeCmds) NotationCmd(elem string, m types.Msg, rsp types.Rsp) {
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
return
}
rsp.Acknowledge()
tree := trees.NewNotationTree(dat)
dat.Lock.RLock()
msg, suc := tree.AddElem(elem)
dat.Lock.RUnlock()
if !suc {
rsp.ErrorMessage(msg)
return
}
txt := tree.String()
if len(txt) <= 2000 {
id := rsp.Message("Sent notation in DMs!")
dat.SetMsgElem(id, elem)
b.lock.Lock()
b.dat[m.GuildID] = dat
b.lock.Unlock()
rsp.DM(txt)
return
}
id := rsp.Message("The notation was too long! Sending it as a file in DMs!")
dat.SetMsgElem(id, elem)
b.lock.Lock()
b.dat[m.GuildID] = dat
b.lock.Unlock()
channel, err := b.dg.UserChannelCreate(m.Author.ID)
if rsp.Error(err) {
return
}
buf := strings.NewReader(txt)
el, _ := dat.GetElement(elem)
b.dg.ChannelMessageSendComplex(channel.ID, &discordgo.MessageSend{
Content: fmt.Sprintf("Notation for **%s**:", el.Name),
Files: []*discordgo.File{
{
Name: "notation.txt",
ContentType: "text/plain",
Reader: buf,
},
},
})
}
func (b *TreeCmds) CatNotationCmd(catName string, m types.Msg, rsp types.Rsp) {
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
return
}
rsp.Acknowledge()
tree := trees.NewNotationTree(dat)
cat, res := dat.GetCategory(catName)
if !res.Exists {
rsp.ErrorMessage(res.Message)
}
dat.Lock.RLock()
for elem := range cat.Elements {
msg, suc := tree.AddElem(elem)
if !suc {
dat.Lock.RUnlock()
rsp.ErrorMessage(msg)
return
}
}
dat.Lock.RUnlock()
txt := tree.String()
if len(txt) <= 2000 {
rsp.Message("Sent notation in DMs!")
rsp.DM(txt)
return
}
rsp.Message("The notation was too long! Sending it as a file in DMs!")
b.lock.Lock()
b.dat[m.GuildID] = dat
b.lock.Unlock()
channel, err := b.dg.UserChannelCreate(m.Author.ID)
if rsp.Error(err) {
return
}
buf := strings.NewReader(txt)
b.dg.ChannelMessageSendComplex(channel.ID, &discordgo.MessageSend{
Content: fmt.Sprintf("Notation for category **%s**:", cat.Name),
Files: []*discordgo.File{
{
Name: "notation.txt",
ContentType: "text/plain",
Reader: buf,
},
},
})
}
|
//
// Copyright 2020 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package resources
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
operatorv1 "github.com/IBM/ibm-auditlogging-operator/api/v1"
"github.com/IBM/ibm-auditlogging-operator/controllers/constant"
"github.com/IBM/ibm-auditlogging-operator/controllers/util"
)
const splunkKey = "splunkCA.pem"
const qRadarKey = "qradar.crt"
// BuildSecret returns a Secret object
func BuildSecret(instance *operatorv1.CommonAudit) *corev1.Secret {
metaLabels := util.LabelsForMetadata(constant.FluentdName)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: AuditLoggingClientCertSecName,
Namespace: instance.Namespace,
Labels: metaLabels,
},
Data: map[string][]byte{
splunkKey: {},
qRadarKey: {},
},
}
return secret
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package repository
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
"github.com/pkg/errors"
"github.com/diegobernardes/flare"
)
// Subscription implements flare.SubscriptionRepositorier.
type Subscription struct {
err error
hasSubscriptionErr error
triggerErr error
base flare.SubscriptionRepositorier
date time.Time
createId string
}
// Find mock flare.SubscriptionRepositorier.FindAll.
func (s *Subscription) Find(
ctx context.Context, pagination *flare.Pagination, resourceId string,
) ([]flare.Subscription, *flare.Pagination, error) {
if s.err != nil {
return nil, nil, s.err
}
subscriptions, page, err := s.base.Find(ctx, pagination, resourceId)
if err != nil {
return nil, nil, err
}
for i := range subscriptions {
subscriptions[i].CreatedAt = s.date
}
return subscriptions, page, nil
}
// FindByID mock flare.SubscriptionRepositorier.FindOne.
func (s *Subscription) FindByID(
ctx context.Context, resourceId, id string,
) (*flare.Subscription, error) {
if s.err != nil {
return nil, s.err
}
res, err := s.base.FindByID(ctx, resourceId, id)
if err != nil {
return nil, err
}
res.CreatedAt = s.date
return res, nil
}
// FindByPartition mock flare.SubscriptionRepositorier.FindByPartition
func (s *Subscription) FindByPartition(
ctx context.Context, resourceID, partition string,
) (<-chan flare.Subscription, <-chan error, error) {
if s.err != nil {
return nil, nil, s.err
}
return s.base.FindByPartition(ctx, resourceID, partition)
}
// Create mock flare.SubscriptionRepositorier.Create.
func (s *Subscription) Create(ctx context.Context, subcr *flare.Subscription) error {
if s.err != nil {
return s.err
}
if s.createId != "" {
subcr.ID = s.createId
}
if err := s.base.Create(ctx, subcr); err != nil {
return err
}
subcr.CreatedAt = s.date
return nil
}
// Delete mock flare.SubscriptionRepositorier.Delete.
func (s *Subscription) Delete(ctx context.Context, resourceId, id string) error {
if s.err != nil {
return s.err
}
return s.base.Delete(ctx, resourceId, id)
}
// Trigger mock flare.SubscriptionRepositorier.Trigger.
func (s *Subscription) Trigger(
ctx context.Context,
action string,
document *flare.Document,
subscription *flare.Subscription,
fn func(context.Context, *flare.Document, *flare.Subscription, string) error,
) error {
if s.triggerErr != nil {
return s.triggerErr
} else if s.err != nil {
return s.err
}
return s.base.Trigger(ctx, action, document, subscription, fn)
}
func newSubscription(options ...func(*Subscription)) *Subscription {
s := &Subscription{}
for _, option := range options {
option(s)
}
return s
}
// SubscriptionRepository set the subscription repository.
func SubscriptionRepository(repository flare.SubscriptionRepositorier) func(*Subscription) {
return func(s *Subscription) { s.base = repository }
}
// SubscriptionCreateId set id on subscription.
func SubscriptionCreateId(id string) func(*Subscription) {
return func(s *Subscription) { s.createId = id }
}
// SubscriptionError set the error to be returned during calls.
func SubscriptionError(err error) func(*Subscription) {
return func(s *Subscription) { s.err = err }
}
// SubscriptionTriggerError set the error to be returned during trigger calls.
func SubscriptionTriggerError(err error) func(*Subscription) {
return func(s *Subscription) { s.triggerErr = err }
}
// SubscriptionHasSubscriptionError set the error to be returned during hasSubscription calls.
func SubscriptionHasSubscriptionError(err error) func(*Subscription) {
return func(s *Subscription) { s.hasSubscriptionErr = err }
}
// SubscriptionDate set the date to be used at time fields.
func SubscriptionDate(date time.Time) func(*Subscription) {
return func(s *Subscription) { s.date = date }
}
// SubscriptionLoadSliceByteSubscription load a list of encoded subscriptions layout into
// repository.
func SubscriptionLoadSliceByteSubscription(content []byte) func(*Subscription) {
return func(s *Subscription) {
subscriptions := make([]struct {
Id string `json:"id"`
Endpoint struct {
URL string `json:"url"`
Method string `json:"method"`
Headers http.Header `json:"headers"`
} `json:"endpoint"`
Delivery struct {
Success []int `json:"success"`
Discard []int `json:"discard"`
} `json:"delivery"`
Resource struct {
Id string `json:"id"`
} `json:"resource"`
CreatedAt time.Time `json:"createdAt"`
}, 0)
if err := json.Unmarshal(content, &subscriptions); err != nil {
panic(errors.Wrap(err,
fmt.Sprintf("error during unmarshal of '%s' into '%v'", string(content), subscriptions),
))
}
for _, rawSubscription := range subscriptions {
uriParsed, err := url.Parse(rawSubscription.Endpoint.URL)
if err != nil {
panic(
errors.Wrap(err, fmt.Sprintf("error during parse '%s' to URL", rawSubscription.Endpoint.URL)),
)
}
err = s.Create(context.Background(), &flare.Subscription{
ID: rawSubscription.Id,
CreatedAt: rawSubscription.CreatedAt,
Resource: flare.Resource{ID: rawSubscription.Resource.Id},
Delivery: flare.SubscriptionDelivery{
Discard: rawSubscription.Delivery.Discard,
Success: rawSubscription.Delivery.Success,
},
Endpoint: flare.SubscriptionEndpoint{
URL: uriParsed,
Method: rawSubscription.Endpoint.Method,
Headers: rawSubscription.Endpoint.Headers,
},
})
if err != nil {
panic(errors.Wrap(err, "error during flare.Subscription persistence"))
}
}
}
}
|
package error
import (
"github.com/allentom/youcomic-api/services"
"github.com/gin-gonic/gin"
)
//error => ApiError
var errorMapping = map[error]ApiError{
JsonParseError: parseJsonApiError,
UserAuthFailError: userAuthFailedApiError,
PermissionError: permissionDeniedApiError,
RequestPathError: requestPathApiError,
services.UserPasswordInvalidate:invalidatePasswordApiError,
services.RecordNotFoundError: recordNotFoundApiError,
}
//add error => ApiError mapping
func RegisterApiError(err error, apiError ApiError) {
errorMapping[err] = apiError
}
//error + context => ApiError
//
//generate api error and server response
func RaiseApiError(ctx *gin.Context, err error, context map[string]interface{}) {
apiError, exists := errorMapping[err]
if !exists {
apiError = defaultApiError
}
reason := apiError.Render(err, context)
ctx.AbortWithStatusJSON(apiError.Status,ErrorResponseBody{
Success: false,
Reason: reason,
Code: apiError.Code,
})
}
func SendApiError(ctx *gin.Context, err error, apiError ApiError, context map[string]interface{}) {
reason := apiError.Render(err, context)
ctx.AbortWithStatusJSON(apiError.Status,ErrorResponseBody{
Success: false,
Reason: reason,
Code: apiError.Code,
})
}
|
package g2util
import (
"reflect"
)
// ValueIndirect ...值类型
func ValueIndirect(val reflect.Value) reflect.Value {
for val.Kind() == reflect.Ptr {
val = val.Elem()
}
return val
}
// NewValue ...
func NewValue(bean interface{}) (val interface{}) {
v := ValueIndirect(reflect.ValueOf(bean))
/*if v.IsZero() {
panic("need not zero value")
}*/
return reflect.New(v.Type()).Interface()
}
//ObjectTagInstances ...
/**
* @Description:根据标签获取字段实例集合
* @param obj
* @param tagName
* @return []interface{}
*/
func ObjectTagInstances(obj interface{}, tagName string) []interface{} {
data := make([]interface{}, 0)
tv1 := ValueIndirect(reflect.ValueOf(obj))
_f1append := func(vv reflect.Value, vf reflect.StructField) {
_, has := vf.Tag.Lookup(tagName)
if !has {
return
}
if !(vv.CanSet() && vv.CanAddr() && vv.Kind() == reflect.Ptr) {
return
}
if vv.IsNil() {
vv.Set(reflect.New(vf.Type.Elem()))
}
data = append(data, vv.Interface())
}
for i := 0; i < tv1.NumField(); i++ {
_f1append(tv1.Field(i), tv1.Type().Field(i))
}
return data
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.