text stringlengths 11 4.05M |
|---|
package common
import "github.com/astaxie/beego/context"
const (
SUCCESS_CODE int = 200
SUCCESS_MESSAGE string = "Success"
)
type ApiResponse struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
func (builder *ApiResponse) AddCode(code int) *ApiResponse {
builder.Code = code
return builder
}
type CommonBuilder struct {
Ctx *context.Context
Response interface{}
encoding bool
Code int
Message string
}
func Success(data interface{}) ApiResponse {
return ApiResponse{SUCCESS_CODE, SUCCESS_MESSAGE, data}
}
|
package kv
import (
"time"
"github.com/cerana/cerana/acomm"
)
func (s *KVS) TestLockKnownBad() {
tests := []struct {
name string
key string
ttl time.Duration
err string
}{
{name: "no key", err: "missing arg: key"},
{name: "no ttl", key: "foo", err: "missing arg: ttl"},
}
for _, test := range tests {
args := LockArgs{
Key: test.key,
TTL: test.ttl,
}
req, err := acomm.NewRequest(acomm.RequestOptions{
Task: "kv-lock",
Args: args,
})
s.Require().NoError(err, test.name)
res, streamURL, err := s.KV.lock(req)
s.EqualError(err, test.err, test.name)
s.Nil(streamURL)
s.Nil(res)
}
}
// a port of pkg/kv/kv_test.go's TestLock
func (s *KVS) TestLock() {
lockReq, err := acomm.NewRequest(acomm.RequestOptions{
Task: "kv-lock",
Args: LockArgs{
Key: s.PrefixKey("some-lock"),
TTL: 1 * time.Second,
},
})
s.Require().NoError(err)
// acquire lock
res, streamURL, err := s.KV.lock(lockReq)
s.Require().NoError(err, "should be able to acquire lock")
s.Require().Nil(streamURL)
s.Require().NotNil(res)
lock := res.(Cookie)
res, streamURL, err = s.KV.lock(lockReq)
s.Require().Error(err, "should not be able to acquire an acquired lock")
s.Require().Nil(streamURL)
s.Require().Nil(res)
// unlocking
unlockReq, err := acomm.NewRequest(acomm.RequestOptions{
Task: "kv-unlock",
Args: lock,
})
res, streamURL, err = s.KV.unlock(unlockReq)
s.Require().NoError(err, "unlocking should not fail")
s.Require().Nil(streamURL)
s.Require().Nil(res)
res, streamURL, err = s.KV.unlock(unlockReq)
s.Require().Error(err, "unlocking lost lock should fail")
s.Require().Nil(streamURL)
s.Require().Nil(res)
res, streamURL, err = s.KV.lock(lockReq)
s.Require().NoError(err, "acquiring an unlocked lock should pass")
s.Require().Nil(streamURL)
s.Require().NotNil(res)
lock = res.(Cookie)
renewReq, err := acomm.NewRequest(acomm.RequestOptions{
Task: "kv-renew",
Args: lock,
})
for i := 0; i < 5; i++ {
res, streamURL, err = s.KV.renew(renewReq)
s.Require().NoError(err, "renewing a lock should pass")
s.Require().Nil(streamURL)
s.Require().Nil(res)
time.Sleep(1 * time.Second)
}
time.Sleep(3 * time.Second)
res, streamURL, err = s.KV.renew(renewReq)
s.Require().Error(err, "renewing an expired lock should fail")
s.Require().Nil(streamURL)
s.Require().Nil(res)
// consul's default lock-delay
// see lock-delay at https://www.consul.io/docs/internals/sessions.html
time.Sleep(15 * time.Second)
res, streamURL, err = s.KV.lock(lockReq)
s.Require().NoError(err, "should be able to acquire previously expired lock")
s.Require().Nil(streamURL)
s.Require().NotNil(res)
}
|
package e4
import "testing"
// TestingFatal returns a WrapFunc that calls t.Fatal if error occur
func TestingFatal(t *testing.T) WrapFunc {
t.Helper()
return func(err error) error {
if err == nil {
return nil
}
t.Helper()
t.Fatal(err)
return err
}
}
|
package handler
import (
clusterRegister "Hybrid_Cluster/clientset/clusterRegister/v1alpha1"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/eks"
cobrautil "Hybrid_Cluster/hybridctl/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
func checkErr(err error) {
if err != nil {
log.Println(err)
}
}
func GetEKSClient(clusterName *string) *eks.EKS {
master_config, _ := cobrautil.BuildConfigFromFlags("master", "/root/.kube/config")
clusterRegisterClientSet, err := clusterRegister.NewForConfig(master_config)
checkErr(err)
clusterRegisters, err := clusterRegisterClientSet.ClusterRegister("eks").Get(*clusterName, metav1.GetOptions{})
checkErr(err)
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String(clusterRegisters.Spec.Region),
}))
eksSvc := eks.New(sess)
return eksSvc
}
func CreateAddon(addonInput eks.CreateAddonInput) (*eks.CreateAddonOutput, error) {
// println(*addonInput.ClusterName)
eksSvc := GetEKSClient(addonInput.ClusterName)
newAddonInput := &eks.CreateAddonInput{
AddonName: addonInput.AddonName,
AddonVersion: addonInput.AddonVersion,
ClientRequestToken: addonInput.ClientRequestToken,
ClusterName: addonInput.ClusterName,
ResolveConflicts: addonInput.ResolveConflicts,
ServiceAccountRoleArn: addonInput.ServiceAccountRoleArn,
Tags: addonInput.Tags,
}
out, err := eksSvc.CreateAddon(newAddonInput)
return out, err
}
func DeleteAddon(addonInput eks.DeleteAddonInput) (*eks.DeleteAddonOutput, error) {
eksSvc := GetEKSClient(addonInput.ClusterName)
newAddonInput := &eks.DeleteAddonInput{
AddonName: addonInput.AddonName,
ClusterName: addonInput.ClusterName,
}
out, err := eksSvc.DeleteAddon(newAddonInput)
return out, err
}
func DescribeAddon(addonInput eks.DescribeAddonInput) (*eks.DescribeAddonOutput, error) {
eksSvc := GetEKSClient(addonInput.ClusterName)
newAddonInput := &eks.DescribeAddonInput{
AddonName: addonInput.AddonName,
ClusterName: addonInput.ClusterName,
}
out, err := eksSvc.DescribeAddon(newAddonInput)
return out, err
}
func DescribeAddonVersions(addonInput eks.DescribeAddonVersionsInput) (*eks.DescribeAddonVersionsOutput, error) {
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
eksSvc := eks.New(sess)
newAddonInput := &eks.DescribeAddonVersionsInput{
AddonName: addonInput.AddonName,
}
out, err := eksSvc.DescribeAddonVersions(newAddonInput)
return out, err
}
func ListAddon(addonInput eks.ListAddonsInput) (*eks.ListAddonsOutput, error) {
eksSvc := GetEKSClient(addonInput.ClusterName)
newAddonInput := &eks.ListAddonsInput{
ClusterName: addonInput.ClusterName,
}
out, err := eksSvc.ListAddons(newAddonInput)
return out, err
}
func UpdateAddon(addonInput eks.UpdateAddonInput) (*eks.UpdateAddonOutput, error) {
eksSvc := GetEKSClient(addonInput.ClusterName)
newAddonInput := &eks.UpdateAddonInput{
ClusterName: addonInput.ClusterName,
AddonName: addonInput.AddonName,
}
out, err := eksSvc.UpdateAddon(newAddonInput)
return out, err
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package ekamath
func MinI(a, b int) int {
if a < b {
return a
} else {
return b
}
}
func MinI8(a, b int8) int8 {
if a < b {
return a
} else {
return b
}
}
func MinI16(a, b int16) int16 {
if a < b {
return a
} else {
return b
}
}
func MinI32(a, b int32) int32 {
if a < b {
return a
} else {
return b
}
}
func MinI64(a, b int64) int64 {
if a < b {
return a
} else {
return b
}
}
func MinU(a, b uint) uint {
if a < b {
return a
} else {
return b
}
}
func MinU8(a, b uint8) uint8 {
if a < b {
return a
} else {
return b
}
}
func MinU16(a, b uint16) uint16 {
if a < b {
return a
} else {
return b
}
}
func MinU32(a, b uint32) uint32 {
if a < b {
return a
} else {
return b
}
}
func MinU64(a, b uint64) uint64 {
if a < b {
return a
} else {
return b
}
}
func MaxI(a, b int) int {
if a > b {
return a
} else {
return b
}
}
func MaxI8(a, b int8) int8 {
if a > b {
return a
} else {
return b
}
}
func MaxI16(a, b int16) int16 {
if a > b {
return a
} else {
return b
}
}
func MaxI32(a, b int32) int32 {
if a > b {
return a
} else {
return b
}
}
func MaxI64(a, b int64) int64 {
if a > b {
return a
} else {
return b
}
}
func MaxU(a, b uint) uint {
if a > b {
return a
} else {
return b
}
}
func MaxU8(a, b uint8) uint8 {
if a > b {
return a
} else {
return b
}
}
func MaxU16(a, b uint16) uint16 {
if a > b {
return a
} else {
return b
}
}
func MaxU32(a, b uint32) uint32 {
if a > b {
return a
} else {
return b
}
}
func MaxU64(a, b uint64) uint64 {
if a > b {
return a
} else {
return b
}
}
|
package npilib
import (
"encoding/xml"
"log"
c "github.com/arkaev/npilib/commands"
)
//Sender : marshal node and send bytes to socket
func startSender(nc *Conn) {
dataToSocket := make(chan []byte)
go func(in chan []byte, client *Conn) {
for data := range in {
client.send(data)
log.Printf("Sent:\n%s\n", data)
}
}(dataToSocket, nc)
go func(in chan c.NCCCommand, out chan []byte) {
for obj := range in {
data, err := xml.MarshalIndent(obj, "", " ")
if err != nil {
log.Printf("error: %v\n", err)
}
out <- data
}
}(nc.commandToSocket, dataToSocket)
}
|
package mj
import "github.com/montanaflynn/stats"
// ---------------------------------------------------------------------
// Type definitions
// ---------------------------------------------------------------------
// LevelHistory is a list of history lines for a particular level.
type LevelHistory struct {
LevelName string
Records []HistoryLine
}
// ---------------------------------------------------------------------
// Constructors
// ---------------------------------------------------------------------
// NewLevelHistory creates a new LevelHistory object with the specified
// name and history line records.
func NewLevelHistory(levelName string, records []HistoryLine) LevelHistory {
lh := new(LevelHistory)
lh.LevelName = levelName
lh.Records = records
return *lh
}
// ---------------------------------------------------------------------
// Methods
// ---------------------------------------------------------------------
// Count returns the number of history lines in this level
func (lh LevelHistory) Count() int {
return len(lh.Records)
}
// Mean returns the mean of the time values for all records.
func (lh LevelHistory) Mean() float64 {
secondsList := make(stats.Float64Data, 0)
for _, historyLine := range lh.Records {
seconds := float64(historyLine.Seconds)
secondsList = append(secondsList, seconds)
}
mean, err := secondsList.Mean()
if err != nil {
mean = 0
}
return mean
}
// StandardDeviation returns the standard deviation of the time values
// for all records.
func (lh LevelHistory) StandardDeviation() float64 {
secondsList := make(stats.Float64Data, 0)
for _, historyLine := range lh.Records {
seconds := float64(historyLine.Seconds)
secondsList = append(secondsList, seconds)
}
if len(secondsList) < 2 {
return 0
}
stdev, _ := secondsList.StandardDeviationSample()
return stdev
}
// Confidence returns the low and high estimates of the time values for
// all records at a 95% confidence level.
func (lh LevelHistory) Confidence() (float64, float64) {
mean := lh.Mean()
stdev := lh.StandardDeviation()
conf := 1.96 * stdev
lo := mean - conf
if lo < 0 {
lo = 0
}
hi := mean + conf
return lo, hi
}
|
package plugins
import (
"helm.sh/helm/pkg/kube"
"k8s.io/apimachinery/pkg/runtime"
"helm.sh/helm/pkg/release"
"WarpCloud/walm/pkg/models/k8s"
)
type RunnerType string
const (
Pre_Install RunnerType = "pre_install"
Post_Install RunnerType = "post_install"
Unknown RunnerType = "unknown"
WalmPluginConfigKey string = "Walm-Plugin-Key"
)
const ResourceUpgradePolicyAnno = "helm.sh/upgrade-policy"
const UpgradePolicy = "keep"
var pluginRunners map[string]*WalmPluginRunner
func register(name string, runner *WalmPluginRunner) {
if pluginRunners == nil {
pluginRunners = map[string]*WalmPluginRunner{}
}
pluginRunners[name] = runner
}
type WalmPluginRunner struct {
Run func(context *PluginContext, args string) error
Type RunnerType
}
func GetRunner(walmPlugin *k8s.ReleasePlugin) *WalmPluginRunner {
if pluginRunners == nil {
return nil
}
return pluginRunners[walmPlugin.Name]
}
type PluginContext struct {
KubeClient kube.Interface
Resources []runtime.Object
R *release.Release
}
|
package validation
import (
"net/url"
"reflect"
"strings"
)
type (
DataFormat map[string][]string
Options struct {
Rules DataFormat
Payload interface{}
}
Validator struct {
Options Options
}
)
var (
validationErrors url.Values
)
func New(options Options) *Validator {
return &Validator{options}
}
func (validator *Validator) Validate() url.Values {
validationErrors = url.Values{}
for key, value := range validator.getPayloadProperties() {
validator.validateProperty(key, value)
}
return validationErrors
}
func (validator *Validator) getPayloadProperties() map[string]interface{} {
concreteValues := reflect.ValueOf(validator.Options.Payload)
properties := make(map[string]interface{})
for i := 0; i < concreteValues.Type().NumField(); i++ {
name := concreteValues.Type().Field(i).Tag.Get("json")
value := concreteValues.Field(i).Interface()
properties[name] = value
}
return properties
}
func (validator *Validator) validateProperty(name string, value interface{}) {
fieldRules := validator.Options.Rules[name]
for _, rule := range fieldRules {
resolveValidationMethod(rule, name, value)
}
}
func resolveValidationMethod(ruleName string, name string, value interface{}) {
var params string
if strings.Contains(ruleName, ":") {
parts := strings.Split(ruleName, ":")
ruleName = parts[0]
params = parts[1]
}
switch ruleName {
case "required":
Required(name, value.(string))
case "integer":
Integer(name, value.(string))
case "str_max":
StrMax(name, value.(string), params)
case "str_min":
StrMin(name, value.(string), params)
case "int_max":
IntMax(name, value.(string), params)
case "int_min":
IntMin(name, value.(string), params)
case "url":
Url(name, value.(string))
case "jp_kanji":
JPKanji(name, value.(string))
case "jp_hiragana":
JPHiragana(name, value.(string))
case "jp_katakana":
JPKatakana(name, value.(string))
case "jp_kana":
JPKana(name, value.(string))
case "jp_all":
JPAll(name, value.(string))
}
}
func addError(name string, message string) {
propertyErrors := []string{}
if value, ok := validationErrors[name]; ok {
propertyErrors = value
}
propertyErrors = append(propertyErrors, message)
validationErrors[name] = propertyErrors
}
|
package elementary
import (
"errors"
)
// Various errors a list function can return.
var (
ErrDeleteSentinel = errors.New("cannot delete sentinel of list")
)
// NewLinkedList creates a new instance of a linked list data structure, which
// is just an arrangement of elements in a linear order. The list is doubly
// linked, i.e. each element has a pointer to its next and previous elements,
// thereby providing a simple, but flexible representation of a dynamic set.
// Further, the list contains a dummy element, called the sentinel. The sentinel
// always lies between the head and the tail of the list - its previous pointer
// will point the tail, while the tail's next pointer points to the sentinel,
// thereby providing a circular, doubly linked list. This allows for simplifying
// each list operation's boundary conditions, but adds an extra element, thus
// adding to increased memory usage.
func NewLinkedList() *List {
// create sentinel value
s := &ListElement{
Key: "",
Value: 0,
}
s.next = s
s.prev = s
// insert sentinel into the list
return &List{
sent: s,
}
}
// List defines a queue structure with a head and tail element and a count of
// the elements in it.
type List struct {
sent *ListElement
}
// ListElement defines an element of the list.
type ListElement struct {
next *ListElement
prev *ListElement
Key string
Value int
}
// IsEmpty checks if the list is empty, by comparing the sentinel's next and
// previous values.
func (l *List) IsEmpty() bool {
return l.sent.next == l.sent.prev
}
// Insert adds a new element with a given value to the list, by inserting it
// right after the sentinel.
func (l *List) Insert(key string, val int) {
el := &ListElement{
next: l.sent.next,
prev: l.sent,
Key: key,
Value: val,
}
// insert element between sentinel and the currently next element.
l.sent.next.prev = el
l.sent.next = el
}
// Search searches for an element with a given key by iteratively checking the
// next element of the list.
func (l *List) Search(key string) (*ListElement, bool) {
el := l.sent.next
for el != l.sent {
if el.Key == key {
return el, true
}
el = el.next
}
return nil, false
}
// Delete removes a given element from the list.
func (l *List) Delete(el *ListElement) error {
if el == l.sent {
return ErrDeleteSentinel
}
el.prev.next = el.next
el.next.prev = el.prev
return nil
}
// Traverse loops through each element in the list until it reaches the
// sentinel.
func (l *List) Traverse(f func(*ListElement)) {
el := l.sent.next
for el != l.sent {
f(el)
el = el.next
}
}
|
package main
import (
"MercerFrame/MercerServer"
"fmt"
)
func main() {
r := MercerServer.Default()
r.Get("/test", func(context *MercerServer.Context) {
context.WriteOnWeb("hello")
name := context.DefaultQuery("name", "no one")
age := context.Query("age")
fmt.Println("name = " + name + "age = " + age)
})
r.Get("/user/age/:name", func(context *MercerServer.Context) {
context.WriteOnWeb("get param")
name := context.PraParam("name")
fmt.Println("name = "+name)
})
r.Post("/test2", func(context *MercerServer.Context) {
context.WriteOnWeb("取post参数")
message := context.PostForm("message")
name := context.DefaultPostForm("name","no one")
fmt.Println(message,name)
})
r.WebSocket1("/ws",MercerServer.WebSocketFunc{
OnOpen: func(context *MercerServer.WebSocketContext) {
fmt.Println("open")
},
//OnMessage: func(context *MercerServer.WebSocketContext) {
// data, err := context.Read()
// if err != nil {
// log.Fatal(err)
// }
// fmt.Println(data)
// log.Println(string(data))
// err = context.Send("helloa")
// if err != nil {
// log.Println("send err:" , err)
// }
// log.Println("send data")
//},
OnClose: func(context *MercerServer.WebSocketContext) {
context.Conn.Close()
},
OnError:nil,
})
//r.WebSocket("/ws", func(ctx *MercerServer.WebSocketContext) {
// ctx.OnOpen()
//})
r.Play(8080)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package audio
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/audio"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
)
// SetupLoopbackDevice setups ALSA loopback (aloop) module and select the loopback devices
// as the output and input.
func SetupLoopbackDevice(ctx context.Context, cr *chrome.Chrome) (cleanup func(context.Context), err error) {
timeForCleanUp := 10 * time.Second
ctxForCleanUp := ctx
ctx, cancel := ctxutil.Shorten(ctx, timeForCleanUp)
defer cancel()
unload, err := audio.LoadAloop(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to load ALSA loopback module")
}
cleanup = func(ctx context.Context) {
// Wait for no stream before unloading aloop as unloading while there is a stream
// will cause the stream in ARC to be in an invalid state.
_ = crastestclient.WaitForNoStream(ctx, 5*time.Second)
unload(ctx)
}
if err := audio.SetupLoopback(ctx, cr); err != nil {
cleanup(ctxForCleanUp)
return nil, errors.Wrap(err, "failed to setup loopback")
}
return cleanup, nil
}
|
package firiclient
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"strconv"
"time"
)
func NewSigner(clientId string, apiKey string, secret []byte) *signer {
return &signer{
clientId: clientId,
apiKey: apiKey,
secretKey: secret,
validForMillis: 2000,
}
}
type SignedData struct {
ClientID string
Signature string
Timestamp time.Time
ValidForMillis int64
}
type signer struct {
apiKey string
clientId string
validForMillis int64
secretKey []byte
}
func (s *signer) Sign(ts time.Time) (*SignedData, error) {
validForMillis := s.validForMillis
type body struct {
Timestamp string `json:"timestamp"`
ValidForMillis string `json:"validity"`
}
data, err := json.Marshal(&body{
Timestamp: strconv.FormatInt(ts.Unix(), 10),
ValidForMillis: strconv.FormatInt(validForMillis, 10),
})
if err != nil {
return nil, err
}
h := hmac.New(sha256.New, s.secretKey)
_, err = h.Write(data)
if err != nil {
return nil, err
}
sig := hex.EncodeToString(h.Sum(nil))
signed := &SignedData{
ClientID: s.clientId,
Signature: sig,
Timestamp: ts,
ValidForMillis: validForMillis,
}
return signed, nil
}
|
package database
import (
"fmt"
"net/url"
"github.com/secmohammed/anonymous-message-board-golang/config"
log "github.com/siruspen/logrus"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
type DatabaseConnection interface {
Get() *gorm.DB
}
type databaseConnection struct {
DB *gorm.DB
}
func NewDatabaseConnection(c *config.Config) DatabaseConnection {
config := c.Get()
user := config.GetString("db.username")
password := config.GetString("db.password")
database := config.GetString("db.database")
host := config.GetString("db.host")
port := config.GetInt("db.port")
var enableLogging logger.Interface
if config.GetBool("db.log") {
enableLogging = logger.Default
}
dsn := url.URL{
User: url.UserPassword(user, password),
Scheme: "postgres",
Host: fmt.Sprintf("%s:%d", host, port),
Path: database,
RawQuery: (&url.Values{"sslmode": []string{"disable"}}).Encode(),
}
db, err := gorm.Open(postgres.Open(dsn.String()), &gorm.Config{
Logger: enableLogging,
})
if err != nil {
log.Fatal("database connection failed")
}
if config.GetBool("db.sync") {
synchronize(db)
}
return &databaseConnection{DB: db}
}
func (d *databaseConnection) Get() *gorm.DB {
return d.DB
}
|
package leetcode_0026_从排序数组中删除重复项
/*
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
说明:
为什么返回数值是整数,但输出的答案是数组呢?
请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
你可以想象内部操作如下:
// nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
int len = removeDuplicates(nums);
// 在函数里修改输入数组对于调用者是可见的。
// 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。
for (int i = 0; i < len; i++) {
print(nums[i]);
}
*/
/*
数组完成排序后,设置两个快慢指针
只要两个指针指向的值相等,就增加快指针
值不相等,复制快指针的值到慢指针+1的位置
接着慢指针+1
直到快指针走完全部
返回慢指针+1
*/
func removeDuplicates(nums []int) int {
// 早发现,早治疗
if len(nums) < 2 {
return len(nums)
}
slow := 0
for fast := 1; fast < len(nums); fast++ {
// 只有不相等的时候,才会替换
// 也就是说,相等的时候,跳过,留着不同的时候用作替换
if nums[fast] != nums[slow] {
// 跳到相等的地方
slow++
// 然后把重复的替换成不重复的
nums[slow] = nums[fast]
}
}
// 返回长度
return slow + 1
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
)
const separator = "#";
const endOfMessage = '\n';
func server() {
ln, err := net.Listen("tcp", ":4500")
if err != nil {
fmt.Println(err)
return
}
for {
c, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
println("new connect")
go handleServerConnection(c)
}
}
type JsonType struct {
Array []int
}
func handleServerConnection(c net.Conn) {
for {
message, _ := bufio.NewReader(c).ReadString(endOfMessage)
req := strings.Split(message, separator);
pattern := req[0];
data := req[1];
arr := JsonType{}
err := json.Unmarshal([]byte(data), &arr.Array)
if err != nil {
fmt.Println(err)
return
}
response := strconv.Itoa(sum(arr))
c.Write([]byte(pattern + separator + response + "\n"))
}
}
func sum(json JsonType) int {
result := 0
for _, v := range json.Array {
result += v
}
return result
}
func main() {
server();
}
|
package runtime_test
import (
. "github.com/d11wtq/bijou/runtime"
"github.com/d11wtq/bijou/test"
"testing"
)
func TestRunWithValidInput(t *testing.T) {
res, err := Run(
`(def head
(fn (hd & tl)
hd))
(head 42 7 23)`,
test.FakeEnv(),
)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if res != Int(42) {
t.Fatalf(`expected res == Int(42), got %s`, res)
}
}
func TestRunWithAccessToEnv(t *testing.T) {
fun := &Func{
Env: test.FakeEnv(),
Params: EmptyList,
Body: EmptyList.Append(Int(42)),
}
env := test.FakeEnv()
env.Def("answer-to-life", fun)
res, err := Run(
`(answer-to-life)`,
env,
)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if res != Int(42) {
t.Fatalf(`expected res == Int(42), got %s`, res)
}
}
func TestRunWithInvalidInput(t *testing.T) {
res, err := Run(
`(def head
(fn wat?`,
test.FakeEnv(),
)
if err == nil {
t.Fatalf(`expected err != nil, got nil`)
}
if res != nil {
t.Fatalf(`expected res == nil, got %s`, res)
}
}
|
package main
import (
"compress/gzip"
"log"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/NYTimes/gziphandler"
"github.com/didip/tollbooth"
"github.com/didip/tollbooth/limiter"
"github.com/rs/cors"
"github.com/sirupsen/logrus"
"github.com/thisendout/apollo"
)
func buildServeMux(rootChain apollo.Chain, settings GatewaySettings) *http.ServeMux {
// build the serve mux (the collection of handler functions)
mux := http.NewServeMux()
for _, api := range settings.APIConfigs {
customChain := rootChain
// add rate limiter
if api.RateLimitPerSecond != 0 {
rl := api.RateLimitPerSecond
// build a rate limiter middleware function
// by default it keys the limiter on the following headers: "RemoteAddr", "X-Forwarded-For", "X-Real-IP"
// create an X request/second limiter and every token bucket in it will expire 1 hour after it was initially set.
lmt := tollbooth.NewLimiter(rl, &limiter.ExpirableOptions{DefaultExpirationTTL: time.Hour})
// trigger a custom function with some logging info when the limit is reached
lmt.SetOnLimitReached(func(w http.ResponseWriter, r *http.Request) {
logger.WithFields(logrus.Fields{
"remote_ip": r.RemoteAddr,
"url": r.URL.String(),
"max_per_second": rl,
}).Info("rate limit exceeded")
return
})
// We override the default headers that are inspected to avoid that requests coming from CloudFlare (or some other CDN)
// edge servers are considered as coming from the same user.
// We avoid this by setting the RemoteAddr lookup last. This ensures it is only used when none of the other headers are available.
lmt.SetIPLookups([]string{"X-Forwarded-For", "X-Real-IP", "RemoteAddr"})
// Make sure the HTTP method is involved in the generation of the rate limit key so
// the CORS preflight OPTIONS requests do not trigger a rate limit for the subsequent real request.
lmt.SetMethods([]string{
http.MethodGet,
http.MethodHead,
http.MethodPost,
http.MethodPut,
http.MethodPatch,
http.MethodDelete,
http.MethodConnect,
http.MethodOptions,
http.MethodTrace,
})
// wrap the rate limiter for use in Apollo chains
wrappedRateLimiter := func(next http.Handler) http.Handler { return tollbooth.LimitHandler(lmt, next) }
// add the rate limiter to the main chain
customChain = customChain.Append(apollo.Wrap(wrappedRateLimiter))
}
// Add the access logging middleware
customChain = customChain.Append(apollo.Wrap(NewLoggerMiddleware(logger)))
// add handling of CORS-related preflight requests.
if api.CORS != nil {
corsHandler := cors.New(api.CORS.ToConfig())
// TODO: default logger is way too verbose. Leave it to our own access Logger.
// // set a logger for the corsHandler.
// // Derive this logger from logrus. We consider all these log entries 'INFO'.
// corsLogger := logger.WriterLevel(logrus.InfoLevel)
// corsHandler.Log = log.New(corsLogger, "cors_preflight--", 0)
customChain = customChain.Append(apollo.Wrap(corsHandler.Handler))
}
// add auth middleware if required
if api.Auth {
customChain = customChain.Append(apollo.Wrap(NewJWTMiddleware(settings.requestAuthenticator)))
}
// add the gzip middleware if required
if api.Gzip {
gz := gziphandler.MustNewGzipLevelHandler(gzip.DefaultCompression)
customChain = customChain.Append(apollo.Wrap(gz))
}
if api.StripPrefix {
// Apollo provides a Wrap function to inject normal http.Handler-based middleware into the chain.
// The context will skip over the injected middleware and pass unharmed to the next context-aware handler in the chain.
customChain = customChain.Append(apollo.Wrap(func(next http.Handler) http.Handler { return http.StripPrefix(api.Prefix, next) }))
}
// parse the target URL
target, err := url.Parse(api.TargetURL)
if err != nil {
logger.WithError(err).Fatalf("could not parse url : %v", api.TargetURL)
}
// parametrise the reverse proxy function
reverseProxyFunc := httputil.NewSingleHostReverseProxy(target)
// set a logger for the reverseProxyFunc, which only logs errors.
// Derive this logger from logrus.
revProxyLogger := logger.WriterLevel(logrus.ErrorLevel)
reverseProxyFunc.ErrorLog = log.New(revProxyLogger, "reverseproxy--", 0)
// wrap the reverse proxy into a handler (so it implements to apollo.Handler) and append it to the chain
mux.Handle(api.Prefix, customChain.Then(reverseProxyHandler(reverseProxyFunc)))
}
return mux
}
|
package cloudformation
// AWSAppSyncDataSource_ElasticsearchConfig AWS CloudFormation Resource (AWS::AppSync::DataSource.ElasticsearchConfig)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-elasticsearchconfig.html
type AWSAppSyncDataSource_ElasticsearchConfig struct {
// AwsRegion AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-elasticsearchconfig.html#cfn-appsync-datasource-elasticsearchconfig-awsregion
AwsRegion string `json:"AwsRegion,omitempty"`
// Endpoint AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-elasticsearchconfig.html#cfn-appsync-datasource-elasticsearchconfig-endpoint
Endpoint string `json:"Endpoint,omitempty"`
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSAppSyncDataSource_ElasticsearchConfig) AWSCloudFormationType() string {
return "AWS::AppSync::DataSource.ElasticsearchConfig"
}
|
package main
import (
"database/sql"
"fmt"
_ "mysql-master"
)
type kontak struct {
ID int
Nama string
Nomor string
}
func main() {
getAllData()
}
func koneksi() (*sql.DB, error) {
db, err := sql.Open("mysql", "root:@tcp(localhost)/kontak")
if err != nil {
return nil, err
}
return db, nil
}
func getAllData() {
db, err := koneksi()
if err != nil {
fmt.Println(err.Error())
return
}
defer db.Close()
rows, err := db.Query("SELECT * FROM telepon")
if err != nil {
fmt.Println(err.Error())
}
defer rows.Close()
var result []kontak
for rows.Next() {
var each = kontak{}
var err = rows.Scan(&each.ID, &each.Nama, &each.Nomor)
if err != nil {
fmt.Println(err.Error())
return
}
result = append(result, each)
}
if err = rows.Err(); err != nil {
fmt.Println(err.Error())
return
}
// Tampil data di console
for _, i := range result {
fmt.Println(i.ID, i.Nama, i.Nomor)
}
}
|
// Copyright 2022 YuWenYu All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package pot
import (
"github.com/gin-contrib/zap"
"github.com/gin-gonic/gin"
"github.com/spf13/cast"
"github.com/yuw-pot/pot/data"
E "github.com/yuw-pot/pot/modules/err"
P "github.com/yuw-pot/pot/modules/properties"
U "github.com/yuw-pot/pot/modules/utils"
Z "github.com/yuw-pot/pot/modules/zlog"
R "github.com/yuw-pot/pot/routes"
"time"
_ "github.com/yuw-pot/pot/autoload"
)
const version string = "v1.0.0"
type (
PoT struct {
vPoT *U.PoT
PoTRoute *R.PoT
PoTError *E.PoT
}
)
func New() *PoT {
return &PoT {
vPoT: U.New(),
}
}
func (engine *PoT) Run() {
engine.vPoT.Fprintf(gin.DefaultWriter, "[%v] %v\n", data.PoT, version)
// Disable Console Color
gin.DisableConsoleColor()
// Gin Mode Release
gin.SetMode(gin.ReleaseMode)
r := gin.New()
PoTMode := P.PropertyPoT.GeT("PoT.Mode", "1")
if engine.vPoT.Contains(PoTMode, 0,1) == false {
PoTMode = 1
}
engine.setMode(r, data.PoTMode[cast.ToInt(PoTMode)])
R.RPoT.Eng = r
r = R.RPoT.Made().Eng
// Https Power ON/OFF
// - PoT.Hssl
// - PoT.Hssl.Power
// - PoT.Hssl.CertFile
// - PoT.Hssl.KeysFile
var rErr error
strPoTPort := ":"+cast.ToString(P.PropertyPoT.GeT("PoT.Port", data.PropertyPort))
if P.PropertyPoT.GeT("PoT.Hssl.Power", 0) == 1 {
PoTHsslCertFile := cast.ToString(P.PropertyPoT.GeT("PoT.Hssl.CertFile", ""))
if PoTHsslCertFile == "" {
panic(E.Err(data.ErrPfx, "PoTSslCF"))
}
PoTHsslKeysFile := cast.ToString(P.PropertyPoT.GeT("PoT.Hssl.KeysFile", ""))
if PoTHsslKeysFile == "" {
panic(E.Err(data.ErrPfx, "PoTSslKF"))
}
engine.vPoT.Fprintf(gin.DefaultWriter, "[%v] Listening and serving HTTPs on %v\n", data.PoT, strPoTPort)
// - Run Https Server (SSL)
rErr = r.RunTLS(strPoTPort, PoTHsslCertFile, PoTHsslKeysFile)
} else {
engine.vPoT.Fprintf(gin.DefaultWriter, "[%v] Listening and serving HTTP on %v\n", data.PoT, strPoTPort)
// - Run Http Server
rErr = r.Run(strPoTPort)
}
if rErr != nil { panic(rErr) }
}
func (engine *PoT) PoT() *PoT {
// Routes Initialized
R.RPoT = engine.PoTRoute
// Err Modules Initialize
// - Combine Error Message of Self Define
E.EPoT = engine.PoTError
E.EPoT.ErrPoTCombine()
return engine
}
func (engine *PoT) setMode(r *gin.Engine, mode interface{}) {
switch mode {
case data.ConsoleMode:
// Mode: Debug
r.Use(gin.Recovery())
r.Use(R.LoggerWithFormat())
return
case data.ReleaseMode:
// Mode Release
// GeT Log Configure
// - data.ZLogPoT construct
var zLogPoT *data.ZLogPoT = nil
_ = P.PropertyPoT.UsK("Logs.PoT", &zLogPoT)
if zLogPoT == nil {
zLogPoT = &data.ZLogPoT{}
}
// - Zap Log New
zLog := Z.New(zLogPoT)
// - Zap Log Made
zLogMade := zLog.Made()
// Add a ginzap middleware, which:
// - Logs all requests, like a combined access and error log.
// - Logs to stdout.
// - RFC3339 with UTC time format.
r.Use(ginzap.Ginzap(zLogMade, time.RFC3339, true))
// Logs all panic to error log
// - stack means whether output the stack info.
r.Use(ginzap.RecoveryWithZap(zLogMade, true))
return
}
}
|
package postgres
import (
"context"
"database/sql"
"fmt"
"github.com/pganalyze/collector/state"
"github.com/pganalyze/collector/util"
)
const transactionIdSQLPg13 string = `
SELECT
pg_catalog.pg_current_xact_id(),
next_multixact_id
FROM pg_catalog.pg_control_checkpoint()
`
const transactionIdSQLDefault string = `
SELECT
pg_catalog.txid_current(),
next_multixact_id
FROM pg_catalog.pg_control_checkpoint()
`
const xminHorizonSQL string = `
SELECT
COALESCE((
SELECT
CASE WHEN COALESCE(pg_catalog.age(backend_xid), 0) > COALESCE(pg_catalog.age(backend_xmin), 0)
THEN backend_xid
ELSE backend_xmin
END
FROM pg_catalog.pg_stat_activity
WHERE backend_xmin IS NOT NULL OR backend_xid IS NOT NULL
ORDER BY greatest(pg_catalog.age(backend_xmin), pg_catalog.age(backend_xid)) DESC
LIMIT 1
), '0'::xid) as backend,
COALESCE((
SELECT
xmin
FROM pg_catalog.pg_replication_slots
WHERE xmin IS NOT NULL
ORDER BY pg_catalog.age(xmin) DESC
LIMIT 1
), '0'::xid) as replication_slot_xmin,
COALESCE((
SELECT
catalog_xmin
FROM pg_catalog.pg_replication_slots
WHERE catalog_xmin IS NOT NULL
ORDER BY pg_catalog.age(catalog_xmin) DESC
LIMIT 1
), '0'::xid) as replication_slot_catalog_xmin,
COALESCE((
SELECT
transaction AS xmin
FROM pg_catalog.pg_prepared_xacts
ORDER BY pg_catalog.age(transaction) DESC
LIMIT 1
), '0'::xid) as prepare_xact,
COALESCE((
SELECT
backend_xmin
FROM %s
WHERE backend_xmin IS NOT NULL
ORDER BY pg_catalog.age(backend_xmin) DESC
LIMIT 1
), '0'::xid) as standby
`
func GetServerStats(ctx context.Context, logger *util.Logger, db *sql.DB, postgresVersion state.PostgresVersion, systemType string) (state.PostgresServerStats, error) {
var stats state.PostgresServerStats
var transactionIdSQL string
// Only collect transaction ID or xmin horizon related stats with non-replicas
if isReplica, err := getIsReplica(ctx, db); err == nil && !isReplica {
// Query xmin horizon before querying the current transaction ID
// as the backend_xmin from pg_stat_activity can point to the "next" transaction ID.
var sourceStatReplicationTable string
if StatsHelperExists(ctx, db, "get_stat_replication") {
logger.PrintVerbose("Found pganalyze.get_stat_replication() stats helper")
sourceStatReplicationTable = "pganalyze.get_stat_replication()"
} else {
sourceStatReplicationTable = "pg_catalog.pg_stat_replication"
}
err = db.QueryRowContext(ctx, QueryMarkerSQL+fmt.Sprintf(xminHorizonSQL, sourceStatReplicationTable)).Scan(
&stats.XminHorizonBackend, &stats.XminHorizonReplicationSlot, &stats.XminHorizonReplicationSlotCatalog,
&stats.XminHorizonPreparedXact, &stats.XminHorizonStandby,
)
if err != nil {
return stats, err
}
if postgresVersion.Numeric >= state.PostgresVersion13 {
transactionIdSQL = transactionIdSQLPg13
} else {
transactionIdSQL = transactionIdSQLDefault
}
err = db.QueryRowContext(ctx, QueryMarkerSQL+transactionIdSQL).Scan(
&stats.CurrentXactId, &stats.NextMultiXactId,
)
if err != nil {
return stats, err
}
}
return stats, nil
}
|
package main
import (
"bufio"
"fmt"
"flag"
"net/http"
"encoding/json"
"go/token"
"go/types"
"io/ioutil"
"os"
"strconv"
"strings"
)
type Currency struct{
Rates map[string]float32
Base string `json:"base"`
Date string `json:"date"`
}
type Contactt struct {
Name string `json:"Name"`
Number string `json:"Number"`
Relation string `json:"Relation"`
}
type Finance struct {
Amount int `json:"Amount"`
Remark string `json:"Remark"`
TransType string `json:"TransType"`
}
func checkFile(filename string) error {
_, err := os.Stat(filename)
if os.IsNotExist(err) {
_, err := os.Create(filename)
if err != nil {
return err
}
}
return nil
}
func main() {
var curr Currency
fromPtr := flag.String("from", "", "From what currency?")
toPtr := flag.String("to", "", "To what currency?")
calcPtr := flag.String("calc", "", "Simple Calculator")
contPtr := flag.String("cont", "", "Simple Contact Manager")
seecontPtr := flag.String("seecont", "", "see contents of Simple Contact Manager")
finPtr := flag.String("fin", "", "Budget Manager")
finresPtr := flag.String("finres", "n", "Returns amount of money left and budget report")
flag.Parse()
if *calcPtr != "" {
fs := token.NewFileSet()
tv, _ := types.Eval(fs, nil, token.NoPos, *calcPtr)
fmt.Println(tv.Value)
} else if *fromPtr != "" {
fmt.Println(*toPtr)
base_url:="https://api.exchangeratesapi.io/latest?base="
resp, err := http.Get(base_url+*fromPtr)
if err != nil {
panic(err)
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
if err := scanner.Err(); err != nil {
panic(err)
}
currJson:=""
for scanner.Scan(){
currJson=currJson+scanner.Text()
}
json.Unmarshal([]byte(currJson), &curr)
if *toPtr == "all" {
fmt.Println(curr.Rates)
} else {
fmt.Println(curr.Rates[*toPtr])
}
} else if *contPtr != "" {
result := strings.Split(*contPtr, ",")
filename := "Contacts.json"
err := checkFile(filename)
if err != nil {
fmt.Println(err)
}
file, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println(err)
}
data := []Contactt{}
json.Unmarshal(file, &data)
newStruct := &Contactt{
Name: result[0],
Number: result[1],
Relation: result[2],
}
data = append(data, *newStruct)
dataBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
}
err = ioutil.WriteFile(filename, dataBytes, 0644)
if err != nil {
fmt.Println(err)
}
} else if *seecontPtr == "y" {
dat, _ := ioutil.ReadFile("Contacts.json")
fmt.Print(string(dat))
} else if *finPtr !="" {
resultt := strings.Split(*finPtr, ",")
filenamee := "Finance.json"
err := checkFile(filenamee)
if err != nil {
fmt.Println(err) }
file, err := ioutil.ReadFile(filenamee)
if err != nil {
fmt.Println(err) }
dataa := []Finance{}
json.Unmarshal(file, &dataa)
amt, _ := strconv.Atoi(resultt[0])
newStruct := &Finance{
Amount : amt,
Remark : resultt[1],
TransType : resultt[2],
}
dataa = append(dataa, *newStruct)
dataBytes, err := json.Marshal(dataa)
if err != nil {
fmt.Println(err) }
err = ioutil.WriteFile(filenamee, dataBytes, 0644)
if err != nil {
fmt.Println(err) }
} else if *finresPtr == "y" {
var arr []Finance
finname := "Finance.json"
//finresPtr := flag.String("finres", "n", "a string")
//flag.Parse()
datt, _ := ioutil.ReadFile(finname)
json.Unmarshal([]byte(datt), &arr)
amt:=0
for index, element := range arr {
fmt.Println(index, "=>", element)
if element.TransType == "+" {
amt=amt+element.Amount
} else {
amt=amt-element.Amount
}
}
fmt.Println("Amount of money Left is : "+ strconv.Itoa(amt))
}
}
|
package main
import (
"github.com/riita10069/check_interface"
"golang.org/x/tools/go/analysis/unitchecker"
)
func main() { unitchecker.Main(check_interface.Analyzer) }
|
package repository
import "github.com/lazhari/web-jwt/models"
func (pr postgresRepository) CreatePost(p *models.Post) (*models.Post, error) {
dbc := pr.db.Create(p)
if dbc.Error != nil {
return nil, dbc.Error
}
return p, nil
}
func (pr postgresRepository) GetAllPosts() ([]models.Post, error) {
var posts []models.Post
result := pr.db.Find(&posts)
if result.Error != nil {
return nil, result.Error
}
return posts, nil
}
|
package main
func main() {
println("hello world2")
}
|
package main
import (
"github.com/stretchr/testify/assert"
"testing"
)
type Test struct {
input string
expected int
}
func TestSolvePartOne(t *testing.T) {
assert := assert.New(t)
tests := []Test{
Test{input: "(())", expected: 0},
Test{input: "()()", expected: 0},
Test{input: "(((", expected: 3},
Test{input: "(()(()(", expected: 3},
Test{input: "))(((((", expected: 3},
Test{input: "())", expected: -1},
Test{input: "))(", expected: -1},
Test{input: ")))", expected: -3},
Test{input: ")())())", expected: -3},
}
for _, test := range tests {
partOne := solvePartOne(test.input)
assert.Equal(test.expected, partOne)
}
}
func TestSolvePartTwo(t *testing.T) {
assert := assert.New(t)
tests := []Test{
Test{input: ")", expected: 1},
Test{input: "()())", expected: 5},
}
for _, test := range tests {
partTwo := solvePartTwo(test.input)
assert.Equal(test.expected, partTwo)
}
}
|
package util
import (
"testing"
)
func TestIsStringEmpty(t *testing.T) {
if !IsStringEmpty(" ") {
t.Error("Empty string was not considered empty")
}
if IsStringEmpty(" stuff ") {
t.Error("Non-empty string considered empty")
}
}
func TestFileProcessing(t *testing.T) {
var foundLine1, foundLine2 bool
ProcessNonEmptyFileLines("./test_file.txt",'\n', func(line string) error {
if line == "line1" {
foundLine1 = true
} else if line == "line2" {
foundLine2 = true
} else {
t.Errorf("Unexpected line in test file: %v",line)
}
return nil
})
if !(foundLine1 && foundLine2) {
t.Errorf("Did not receive line1 or line2 in test")
}
}
func TestDoesFileExist(t *testing.T) {
if !DoesFileExist("./util_test.go") {
t.Error("util_test.go does not exist in directory")
}
} |
package main
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRenderServer(t *testing.T) {
app := &renderServer{}
ts := httptest.NewServer(app)
defer ts.Close()
formatURL := func(urlPath string) string {
return "http://" + ts.Listener.Addr().String() + urlPath
}
t.Log(formatURL("/"))
cases := []struct {
name string
method string
diagramSrc string
wantStatusCode int
wantSuccess bool
wantSVGRaw string
wantErr string
}{
{name: "empty scr", method: http.MethodPost, wantSuccess: false, wantStatusCode: http.StatusBadRequest, wantErr: "invalid arguments: empty src of diagram"},
{name: "invalid sintax", method: http.MethodPost, diagramSrc: "foobar", wantSuccess: false, wantStatusCode: http.StatusOK, wantErr: "failed render: /* 1 */ foobar\n \nERROR: syntax error\n"},
{name: "ok", method: http.MethodPost, diagramSrc: `text "some title"
box "some box"
`, wantSuccess: true, wantStatusCode: http.StatusOK, wantSVGRaw: `<svg xmlns='http://www.w3.org/2000/svg' viewBox="0 0 201.254 76.32">
<text x="43" y="38" text-anchor="middle" fill="rgb(0,0,0)" dominant-baseline="central">some title</text>
<path d="M91,74L199,74L199,2L91,2Z" style="fill:none;stroke-width:2.16;stroke:rgb(0,0,0);" />
<text x="145" y="38" text-anchor="middle" fill="rgb(0,0,0)" dominant-baseline="central">some box</text>
</svg>
`},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
reqData := map[string]interface{}{
"diagram_src": c.diagramSrc,
}
reqBytes, err := json.Marshal(reqData)
require.NoError(t, err, "encode request")
t.Logf("request json: %q", string(reqBytes))
req, err := http.NewRequest(c.method, formatURL("/"), bytes.NewReader(reqBytes))
require.NoError(t, err)
rec := httptest.NewRecorder()
app.ServeHTTP(rec, req)
assert.EqualValues(t, c.wantStatusCode, rec.Result().StatusCode)
t.Logf("response json: %q", rec.Body.Bytes())
resData := map[string]interface{}{}
err = json.Unmarshal(rec.Body.Bytes(), &resData)
require.NoError(t, err, "decode response")
require.EqualValues(t, c.wantSuccess, resData["success"])
if resData["success"].(bool) {
assert.EqualValues(t, c.wantSVGRaw, resData["svg_raw"])
} else {
assert.EqualValues(t, c.wantErr, resData["err"])
}
})
}
}
|
package model
import (
"context"
"github.com/pkg/errors"
)
var (
ErrUnauthorizedAccessToken = errors.New("unauthorized access token")
ErrUserOrChannelNotFound = errors.New("user or channel not found")
ErrInvalidPlaylistID = errors.New("invalid playlist id")
ErrPlaylistNotFound = errors.New("playlist not found")
ErrPlaylistItemNumberLimitExceeded = errors.New("playlist already contains the maximum allowed number of items")
ErrEmptyPlaylistName = errors.New("playlist name required")
ErrPlaylistItemNotAccessible = errors.New("unauthorized to retrieve the specified playlist")
)
type Artist struct {
name string
}
func (a *Artist) Name() string {
return a.name
}
func NewArtist(name string) *Artist {
return &Artist{name: name}
}
type Track struct {
name string
artists []*Artist
}
func (t *Track) Name() string {
return t.name
}
func (t *Track) Artists() []*Artist {
return t.artists
}
func NewTrack(name string, artists []*Artist) *Track {
return &Track{name: name, artists: artists}
}
type Playlist struct {
id string
name string
description string
numberOfTracks int
tracks []*Track
}
func (l *Playlist) ID() string {
return l.id
}
func (l *Playlist) Name() string {
return l.name
}
func (l *Playlist) NumberOfTracks() int {
return l.numberOfTracks
}
func (l *Playlist) Description() string {
return l.description
}
func (l *Playlist) Tracks() []*Track {
return l.tracks
}
func NewPlaylist(
id string,
name string,
description string,
numberOfTracks int,
tracks []*Track,
) *Playlist {
return &Playlist{
id: id,
name: name,
description: description,
numberOfTracks: numberOfTracks,
tracks: tracks,
}
}
type Result struct {
playlistID string
success int
failure int
}
func (r *Result) PlaylistID() string {
return r.playlistID
}
func (r *Result) Success() int {
return r.success
}
func (r *Result) Failure() int {
return r.failure
}
func NewResult(playlistID string, success int, failure int) *Result {
return &Result{playlistID: playlistID, success: success, failure: failure}
}
type ThirdPartyMusicService interface {
GetPlaylists(ctx context.Context, credential *Credential, id string) ([]*Playlist, error)
GetOwnedPlaylists(ctx context.Context, credential *Credential) ([]*Playlist, error)
GetPlaylistItemsByID(ctx context.Context, credential *Credential, playlistID string) (*Playlist, error)
CreatePlaylist(ctx context.Context, credential *Credential, playlist *Playlist) (*Result, error)
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package vm
import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
func TestZonePlacement(t *testing.T) {
for i, c := range []struct {
numZones, numNodes int
expected []int
}{
{1, 1, []int{0}},
{1, 2, []int{0, 0}},
{2, 4, []int{0, 0, 1, 1}},
{2, 5, []int{0, 0, 1, 1, 0}},
{3, 11, []int{0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 1}},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
assert.EqualValues(t, c.expected, ZonePlacement(c.numZones, c.numNodes))
})
}
}
func TestExpandZonesFlag(t *testing.T) {
for i, c := range []struct {
input, output []string
expErr string
}{
{
input: []string{"us-east1-b:3", "us-west2-c:2"},
output: []string{"us-east1-b", "us-east1-b", "us-east1-b", "us-west2-c", "us-west2-c"},
},
{
input: []string{"us-east1-b:3", "us-west2-c"},
output: []string{"us-east1-b", "us-east1-b", "us-east1-b", "us-west2-c"},
},
{
input: []string{"us-east1-b", "us-west2-c"},
output: []string{"us-east1-b", "us-west2-c"},
},
{
input: []string{"us-east1-b", "us-west2-c:a2"},
expErr: "failed to parse",
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
expanded, err := ExpandZonesFlag(c.input)
if c.expErr != "" {
if assert.Error(t, err) {
assert.Regexp(t, c.expErr, err.Error())
}
} else {
assert.EqualValues(t, c.output, expanded)
}
})
}
}
func TestVM_ZoneEntry(t *testing.T) {
cases := []struct {
description string
vm VM
expected string
expErr string
}{
{
description: "Normal length",
vm: VM{Name: "just_a_test", PublicIP: "1.1.1.1"},
expected: "just_a_test 60 IN A 1.1.1.1\n",
},
{
description: "Too long name",
vm: VM{
Name: "very_very_very_very_very_very_very_very_very_very_very_very_very_very_long_name",
PublicIP: "1.1.1.1",
},
expErr: "Name too long",
},
{
description: "Missing IP",
vm: VM{Name: "just_a_test"},
expErr: "Missing IP address",
},
}
for _, c := range cases {
t.Run(c.description, func(t *testing.T) {
expanded, err := c.vm.ZoneEntry()
if c.expErr != "" {
if assert.Error(t, err) {
assert.Regexp(t, c.expErr, err.Error())
}
} else {
assert.EqualValues(t, c.expected, expanded)
}
})
}
}
func TestDNSSafeAccount(t *testing.T) {
cases := []struct {
description, input, expected string
}{
{
"regular", "username", "username",
},
{
"mixed case", "UserName", "username",
},
{
"dot", "user.name", "username",
},
{
"underscore", "user_name", "username",
},
{
"dot and underscore", "u.ser_n.a_me", "username",
},
{
"Unicode and other characters", "~/❦u.ser_ऄn.a_meλ", "username",
},
}
for _, c := range cases {
t.Run(c.description, func(t *testing.T) {
assert.EqualValues(t, DNSSafeAccount(c.input), c.expected)
})
}
}
|
package isanagram
import "testing"
func TestIsAnagram(t *testing.T) {
if isAnagram("anagram", "nagaram") != true {
t.Errorf("Get false, Expect true")
}
if isAnagram("rat", "car") != false {
t.Errorf("Get true, Expect flase")
}
if isAnagram("anagra\u007Am", "nagaam\u007Ar") != true {
t.Errorf("Get false, Expect true")
}
if isAnagram("a", "ab") != false {
t.Errorf("Get true, Expect flase")
}
}
|
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gardenerscheduler
import (
"context"
"fmt"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
flag "github.com/spf13/pflag"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/hostscheduler"
"github.com/gardener/test-infra/pkg/hostscheduler/cleanup"
"github.com/gardener/test-infra/pkg/util/gardener"
kutil "github.com/gardener/test-infra/pkg/util/kubernetes"
)
var (
// NotMonitoringComponent is a requirement that something doesn't have the GardenRole GardenRoleMonitoring.
NotMonitoringComponent = cleanup.MustNewRequirement(v1beta1constants.GardenRole, selection.NotEquals, v1beta1constants.GardenRoleMonitoring)
// NotKubernetesClusterService is a requirement that something doesnt have the GardenRole GardenRoleOptionalAddon
NotGardenerAddon = cleanup.MustNewRequirement(v1beta1constants.GardenRole, selection.NotEquals, v1beta1constants.GardenRoleOptionalAddon)
)
func (s *gardenerscheduler) Cleanup(flagset *flag.FlagSet) (hostscheduler.SchedulerFunc, error) {
clean := flagset.Bool("clean", false, "Cleanup the specified cluster")
return func(ctx context.Context) error {
if clean != nil || !*clean {
return nil
}
var (
err error
hostConfig = &client.ObjectKey{Name: s.shootName, Namespace: s.namespace}
)
if s.shootName != "" {
hostConfig, err = readHostInformationFromFile()
if err != nil {
s.log.V(3).Info(err.Error())
return errors.New("no shoot cluster is defined. Use --name or create a config file")
}
}
shoot := &gardencorev1beta1.Shoot{}
err = s.client.Get(ctx, client.ObjectKey{Namespace: hostConfig.Namespace, Name: hostConfig.Name}, shoot)
if err != nil {
return fmt.Errorf("cannot get shoot %s: %s", hostConfig.Name, err.Error())
}
hostClient, err := kutil.NewClientFromSecret(ctx, s.client, hostConfig.Namespace, ShootKubeconfigSecretName(shoot.Name), client.Options{
Scheme: gardener.ShootScheme,
})
if err != nil {
return fmt.Errorf("cannot build shoot client: %s", err.Error())
}
shoot, err = WaitUntilShootIsReconciled(ctx, s.log.WithValues("shoot", shoot.Name, "namespace", shoot.Namespace), s.client, shoot)
if err != nil {
return fmt.Errorf("cannot reconcile shoot %s: %s", shoot.Name, err.Error())
}
if shoot.Spec.Hibernation != nil && shoot.Spec.Hibernation.Enabled != nil && *shoot.Spec.Hibernation.Enabled {
s.log.WithValues("shoot", shoot.Name, "namespace", shoot.Namespace).Info("cluster is already free. No need to cleanup.")
return nil
}
if err := cleanup.CleanResources(ctx, s.log, hostClient, labels.Requirements{NotMonitoringComponent, NotGardenerAddon}); err != nil {
return err
}
return nil
}, nil
}
|
package packet
import (
"bytes"
"fmt"
"github.com/lunixbochs/struc"
"io/ioutil"
)
type ServerPacket struct {
Size uint32 `struc:"uint32,little,sizeof=Buffer"` //uint32 size
Precode [1]byte `struc:"[1]pad"` //this is an odd padding issue
OpCode uint16 `struc:"uint16,little"` //uint16 opcode
Buffer []byte
Wpos uint32 `struc:"uint32,little"` //uint32 _wpos
Rpos uint32 `struc:"uint32,little"` //uint32 _rpos
Compressed bool `struc:"bool,little"` //bool compressed
InflatedSize int `struc:"uint32,little"` //uint32 InflatedSize
Destination int `struc:"uint32,little"` //uint32 destination*/
}
func (s *ServerPacket) Sanitize() {
/*s.DeliverTo = StringClamp(s.DeliverTo, 64)
s.To = StringClamp(s.To, 64)
s.From = StringClamp(s.From, 64)
s.FromAdmin = Clamp(s.FromAdmin, 0, 256)
s.ChanNum = Clamp(s.ChanNum, 0, 36000)
s.GuildDBId = Clamp(s.GuildDBId, 0, 36000)
s.Language = Clamp(s.Language, 0, 36000)
s.Queued = Clamp(s.Queued, 0, 256)
s.Message = StringClamp(s.Message, 511)*/
}
func (s *ServerPacket) Encode() (packet []byte, err error) {
s.Sanitize()
//s.Size = len(s.Buffer)
var buf *bytes.Buffer
fmt.Println(len(s.Buffer))
buf = bytes.NewBuffer(make([]byte, len(s.Buffer)+8))
err = struc.Pack(buf, s)
if err != nil {
err = fmt.Errorf("Error packing payload: %s", err.Error())
return
}
packet, err = ioutil.ReadAll(buf)
if err != nil {
err = fmt.Errorf("erro reading buffer: %s", err.Error())
return
}
return
}
func (s *ServerPacket) Decode(packet []byte) (err error) {
var buf *bytes.Buffer
buf = bytes.NewBuffer(packet)
err = struc.Unpack(buf, s)
return
}
|
package eventstore
import (
"context"
"github.com/caos/logging"
auth_view "github.com/caos/zitadel/internal/auth/repository/eventsourcing/view"
org_model "github.com/caos/zitadel/internal/org/model"
org_es "github.com/caos/zitadel/internal/org/repository/eventsourcing"
"github.com/caos/zitadel/internal/org/repository/view/model"
)
type OrgRepository struct {
SearchLimit uint64
*org_es.OrgEventstore
View *auth_view.View
}
func (repo *OrgRepository) SearchOrgs(ctx context.Context, request *org_model.OrgSearchRequest) (*org_model.OrgSearchResult, error) {
request.EnsureLimit(repo.SearchLimit)
sequence, err := repo.View.GetLatestOrgSequence()
logging.Log("EVENT-7Udhz").OnError(err).Warn("could not read latest org sequence")
members, count, err := repo.View.SearchOrgs(request)
if err != nil {
return nil, err
}
result := &org_model.OrgSearchResult{
Offset: request.Offset,
Limit: request.Limit,
TotalResult: uint64(count),
Result: model.OrgsToModel(members),
}
if err == nil {
result.Sequence = sequence.CurrentSequence
result.Timestamp = sequence.CurrentTimestamp
}
return result, nil
}
|
// Package datasheet provides the operations about datasheet
package datasheet
import (
"fmt"
"github.com/apitable/apitable-sdks/apitable.go/lib/common"
athttp "github.com/apitable/apitable-sdks/apitable.go/lib/common/http"
"github.com/apitable/apitable-sdks/apitable.go/lib/common/profile"
"math"
)
const maxPageSize = 1000
const recordPath = "/fusion/v1/datasheets/%s/records"
const attachPath = "/fusion/v1/datasheets/%s/attachments"
type Datasheet struct {
common.Client
DatasheetId string
}
// NewDatasheet init datasheet instance
func NewDatasheet(credential *common.Credential, datasheetId string, clientProfile *profile.ClientProfile) (datasheet *Datasheet, err error) {
datasheet = &Datasheet{}
datasheet.DatasheetId = datasheetId
datasheet.Init().WithCredential(credential).WithProfile(clientProfile)
return
}
// NewDescribeRecordRequest init datasheet record request instance
func NewDescribeRecordRequest() (request *DescribeRecordRequest) {
request = &DescribeRecordRequest{
BaseRequest: &athttp.BaseRequest{},
}
return
}
func NewCreateRecordsRequest() (request *CreateRecordsRequest) {
request = &CreateRecordsRequest{
BaseRequest: &athttp.BaseRequest{},
}
return
}
func NewModifyRecordsRequest() (request *ModifyRecordsRequest) {
request = &ModifyRecordsRequest{
BaseRequest: &athttp.BaseRequest{},
}
return
}
func NewDeleteRecordsRequest() (request *DeleteRecordsRequest) {
request = &DeleteRecordsRequest{
BaseRequest: &athttp.BaseRequest{},
}
return
}
func NewUploadRequest() (request *UploadRequest) {
request = &UploadRequest{
BaseRequest: &athttp.BaseRequest{},
}
return
}
func NewDescribeRecordResponse() (response *DescribeRecordResponse) {
response = &DescribeRecordResponse{
BaseResponse: &athttp.BaseResponse{},
}
return
}
func NewUploadResponse() (response *UploadResponse) {
response = &UploadResponse{
BaseResponse: &athttp.BaseResponse{},
}
return
}
// DescribeAllRecords use to query the details of all records.
//
// * according to `ViewId`, column name, `FieldId` or other information to query the record detailed information.
// * For details of filtering information please see `RecordRequest`。
// * If the parameter is empty, all records in the current datasheet are returned.
func (c *Datasheet) DescribeAllRecords(request *DescribeRecordRequest) (records []*Record, err error) {
if request == nil {
request = NewDescribeRecordRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetHttpMethod(athttp.GET)
request.PageSize = common.Int64Ptr(maxPageSize)
request.PageNum = common.Int64Ptr(1)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
total := response.Data.Total
// calculate the total number of cycles.
if *total > maxPageSize {
times := int(math.Ceil(float64(*total / maxPageSize)))
for i := 1; i <= times; i++ {
request.PageNum = common.Int64Ptr(int64(i + 1))
tmp := NewDescribeRecordResponse()
err = c.Send(request, tmp)
if err != nil {
// if one error, all error.
return nil, err
}
response.Data.Records = append(response.Data.Records, tmp.Data.Records...)
}
}
response.Data.PageNum = common.Int64Ptr(0)
return response.Data.Records, nil
}
// DescribeRecords use to query paging records' details.
//
// * according to `ViewId`, column name, `FieldId` or other information to query the record detailed information.
// * For details of filtering information please see `RecordRequest`。
// * If the parameter is empty, return paging according to the default. The default is 100 records per page.
func (c *Datasheet) DescribeRecords(request *DescribeRecordRequest) (pagination *RecordPagination, err error) {
if request == nil {
request = NewDescribeRecordRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetHttpMethod(athttp.GET)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
return response.Data, nil
}
// DescribeRecord used to obtain a single record
// * according to `ViewId`, column name, `FieldId` or other information to query the record detailed information.
// * For details of filtering information please see `RecordRequest`。
// * returns the first record queried
func (c *Datasheet) DescribeRecord(request *DescribeRecordRequest) (record *Record, err error) {
if request == nil {
request = NewDescribeRecordRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetHttpMethod(athttp.GET)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
if len(response.Data.Records) > 0 {
return response.Data.Records[0], nil
}
return nil, nil
}
// CreateRecords used to create multiple records
func (c *Datasheet) CreateRecords(request *CreateRecordsRequest) (records []*Record, err error) {
if request == nil {
request = NewCreateRecordsRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetContentType(athttp.JsonContent)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
return response.Data.Records, nil
}
// ModifyRecords used to modify multiple records
func (c *Datasheet) ModifyRecords(request *ModifyRecordsRequest) (records []*Record, err error) {
if request == nil {
request = NewModifyRecordsRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetContentType(athttp.JsonContent)
request.SetHttpMethod(athttp.PATCH)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
return response.Data.Records, nil
}
// DeleteRecords used to delete multiple records
func (c *Datasheet) DeleteRecords(request *DeleteRecordsRequest) (err error) {
if request == nil {
request = NewDeleteRecordsRequest()
}
request.Init().SetPath(fmt.Sprintf(recordPath, c.DatasheetId))
request.SetHttpMethod(athttp.DELETE)
response := NewDescribeRecordResponse()
err = c.Send(request, response)
return
}
// UploadFile used to upload attachments
func (c *Datasheet) UploadFile(request *UploadRequest) (attachment *Attachment, err error) {
if request == nil {
request = NewUploadRequest()
}
body, contentType, err := common.FileBuffer(request.FilePath)
if err != nil {
return
}
request.Init()
request.SetPath(fmt.Sprintf(attachPath, c.DatasheetId))
request.SetHttpMethod(athttp.POST)
request.SetFile(body)
request.SetContentType(contentType)
response := NewUploadResponse()
err = c.Send(request, response)
if err != nil {
return nil, err
}
return response.Data, nil
}
|
package main
const (
templateUsecaseUOW = `// {{.Header}}
package usecase
import (
"sync"
{{- range $module := .Modules}}
{{clean $module.ModuleName}}usecase "{{$.GoModName}}/internal/modules/{{cleanPathModule $module.ModuleName}}/usecase"
{{- end }}
"{{.PackageName}}/codebase/factory/dependency"
)
type (
// Usecase unit of work for all usecase in modules
Usecase interface {
{{- range $module := .Modules}}
{{clean (upper $module.ModuleName)}}() {{clean $module.ModuleName}}usecase.{{clean (upper $module.ModuleName)}}Usecase
{{- end }}
}
usecaseUow struct {
{{- range $module := .Modules}}
{{clean $module.ModuleName}} {{clean $module.ModuleName}}usecase.{{clean (upper $module.ModuleName)}}Usecase
{{- end }}
}
)
var usecaseInst *usecaseUow
var once sync.Once
// SetSharedUsecase set singleton usecase unit of work instance
func SetSharedUsecase(deps dependency.Dependency) {
once.Do(func() {
usecaseInst = &usecaseUow{
{{- range $module := .Modules}}
{{clean $module.ModuleName}}: {{clean $module.ModuleName}}usecase.New{{clean (upper $module.ModuleName)}}Usecase(deps),
{{- end }}
}
})
}
// GetSharedUsecase get usecase unit of work instance
func GetSharedUsecase() Usecase {
return usecaseInst
}
{{- range $module := .Modules}}
func (uc *usecaseUow) {{clean (upper $module.ModuleName)}}() {{clean $module.ModuleName}}usecase.{{clean (upper $module.ModuleName)}}Usecase {
return uc.{{clean $module.ModuleName}}
}
{{- end }}
`
templateUsecaseAbstraction = `// {{.Header}}
package usecase
import (
"context"
)
// {{clean (upper .ModuleName)}}Usecase abstraction
type {{clean (upper .ModuleName)}}Usecase interface {
// add method
Hello(ctx context.Context) string
}
`
templateUsecaseImpl = `// {{.Header}}
package usecase
import (
"context"
{{ if not (or .SQLDeps .MongoDeps) }}// {{end}}"{{.GoModName}}/pkg/shared/repository"
"{{.PackageName}}/codebase/factory/dependency"
"{{.PackageName}}/codebase/interfaces"
"{{.PackageName}}/tracer"
)
type {{clean .ModuleName}}UsecaseImpl struct {
cache interfaces.Cache
{{if .SQLDeps}}repoSQL *repository.RepoSQL{{end}}
{{if .MongoDeps}}repoMongo *repository.RepoMongo{{end}}
}
// New{{clean (upper .ModuleName)}}Usecase usecase impl constructor
func New{{clean (upper .ModuleName)}}Usecase(deps dependency.Dependency) {{clean (upper .ModuleName)}}Usecase {
return &{{clean .ModuleName}}UsecaseImpl{
{{if .RedisDeps}}cache: deps.GetRedisPool().Cache(),{{end}}
{{if .SQLDeps}}repoSQL: repository.GetSharedRepoSQL(),{{end}}
{{if .MongoDeps}}repoMongo: repository.GetSharedRepoMongo(),{{end}}
}
}
func (uc *{{clean .ModuleName}}UsecaseImpl) Hello(ctx context.Context) (msg string) {
trace := tracer.StartTrace(ctx, "{{clean (upper .ModuleName)}}Usecase:Hello")
defer trace.Finish()
ctx = trace.Context()
{{if .SQLDeps}}msg, _ = uc.repoSQL.{{clean (upper .ModuleName)}}Repo.FindHello(ctx){{end}}
{{if .MongoDeps}}msg, _ = uc.repoMongo.{{clean (upper .ModuleName)}}Repo.FindHello(ctx){{end}}
return
}
`
)
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package predicates
import (
"yunion.io/x/jsonutils"
computeapi "yunion.io/x/onecloud/pkg/apis/compute"
computemodels "yunion.io/x/onecloud/pkg/compute/models"
"yunion.io/x/onecloud/pkg/scheduler/api"
"yunion.io/x/onecloud/pkg/scheduler/core"
)
type HostSchedtagPredicate struct {
*ServerBaseSchedtagPredicate
}
func NewHostSchedtagPredicate() core.FitPredicate {
p := new(HostSchedtagPredicate)
p.ServerBaseSchedtagPredicate = NewServerBaseSchedtagPredicate(p)
return p
}
func (p *HostSchedtagPredicate) Name() string {
return "host_schedtag"
}
func (p *HostSchedtagPredicate) Clone() core.FitPredicate {
return NewHostSchedtagPredicate()
}
type hostSchedtagInputW struct {
schedData *api.SchedInfo
host string
backupHost string
schedtags []*computeapi.SchedtagConfig
}
func (p *HostSchedtagPredicate) GetInputs(u *core.Unit) []ISchedtagCustomer {
data := u.SchedData()
tags := data.Schedtags
return []ISchedtagCustomer{
&hostSchedtagInputW{
schedData: data,
host: data.PreferHost,
backupHost: data.PreferBackupHost,
schedtags: GetInputSchedtagByType(tags, "", computemodels.HostManager.KeywordPlural()),
}}
}
func (w *hostSchedtagInputW) Keyword() string {
return "server"
}
func (w *hostSchedtagInputW) ResourceKeyword() string {
return "host"
}
func (w *hostSchedtagInputW) GetDynamicConditionInput() *jsonutils.JSONDict {
return w.schedData.ToConditionInput()
}
func (w *hostSchedtagInputW) IsSpecifyResource() bool {
return w.host != "" || w.backupHost != ""
}
func (w *hostSchedtagInputW) GetSchedtags() []*computeapi.SchedtagConfig {
return w.schedtags
}
type hostSchedtagResW struct {
core.Candidater
}
func (r hostSchedtagResW) GetName() string {
return r.Candidater.Getter().Name()
}
func (r hostSchedtagResW) GetId() string {
return r.Candidater.Getter().Id()
}
func (r hostSchedtagResW) Keyword() string {
return r.Candidater.Getter().Host().Keyword()
}
func (r hostSchedtagResW) GetSchedtags() []computemodels.SSchedtag {
return r.Candidater.Getter().HostSchedtags()
}
func (r hostSchedtagResW) GetSchedtagJointManager() computemodels.ISchedtagJointManager {
return r.Candidater.Getter().Host().GetSchedtagJointManager()
}
func (r hostSchedtagResW) GetDynamicConditionInput() *jsonutils.JSONDict {
return r.GetSchedDesc()
}
func (p *HostSchedtagPredicate) GetCandidateResource(c core.Candidater) ISchedtagCandidateResource {
return hostSchedtagResW{c}
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package quotapool
import "sync"
// bufferSize is the size of the ringBuf buf served from a notifyQueueNodePool.
//
// Each node is 32+8*bufferSize bytes so at 28 a node is 256 bytes which
// feels like a nice number.
const bufferSize = 28
// notifyQueue provides an allocation efficient FIFO queue for chan struct{}.
//
// notifyQueue is not safe for concurrent use.
type notifyQueue struct {
len int64
head *node
pool *notifyQueueNodePool
}
// initializeNotifyQueue initializes a notifyQueue.
// notifyQueue values should not be used until they have been initialized.
// It is illegal to initialize a notifyQueue more than once and this function
// will panic if called with an already initialized notifyQueue.
func initializeNotifyQueue(q *notifyQueue) {
if q.pool != nil {
panic("cannot re-initialize a notifyQueue")
}
defaultNotifyQueueNodePool.initialize(q)
}
var defaultNotifyQueueNodePool = newNotifyQueueNodePool()
// enqueue adds c to the end of the queue and returns the address of the added
// notifyee.
func (q *notifyQueue) enqueue(c chan struct{}) (n *notifyee) {
if q.head == nil {
q.head = q.pool.pool.Get().(*node)
q.head.prev = q.head
q.head.next = q.head
}
tail := q.head.prev
if n = tail.enqueue(c); n == nil {
newTail := q.pool.pool.Get().(*node)
tail.next = newTail
q.head.prev = newTail
newTail.prev = tail
newTail.next = q.head
if n = newTail.enqueue(c); n == nil {
panic("failed to enqueue into a fresh buffer")
}
}
q.len++
return n
}
// dequeue removes the current head of the queue which can be accessed with
// peek().
func (q *notifyQueue) dequeue() {
if q.head == nil {
return
}
q.head.dequeue()
if q.head.len == 0 {
oldHead := q.head
if oldHead.next == oldHead {
q.head = nil
} else {
q.head = oldHead.next
q.head.prev = oldHead.prev
q.head.prev.next = q.head
}
*oldHead = node{}
q.pool.pool.Put(oldHead)
}
q.len--
}
// peek returns the current head of the queue or nil if the queue is empty.
// It does not modify the queue. It is illegal to use the returned pointer after
// the next call to dequeue.
func (q *notifyQueue) peek() *notifyee {
if q.head == nil {
return nil
}
return &q.head.buf[q.head.head]
}
// notifyQueueNodePool constructs notifyQueue objects which internally pool
// their buffers.
type notifyQueueNodePool struct {
pool sync.Pool
}
// newNotifyQueueNodePool returns a new notifyQueueNodePool which can be used
// to construct notifyQueues which internally pool their buffers.
func newNotifyQueueNodePool() *notifyQueueNodePool {
return ¬ifyQueueNodePool{
pool: sync.Pool{
New: func() interface{} { return &node{} },
},
}
}
// initialize initializes a which will share a sync.Pool of nodes with the
// other notifyQueue instances initialized with this pool.
func (p *notifyQueueNodePool) initialize(q *notifyQueue) {
*q = notifyQueue{pool: p}
}
type node struct {
ringBuf
prev, next *node
}
type notifyee struct {
c chan struct{}
}
type ringBuf struct {
buf [bufferSize]notifyee
head int64
len int64
}
func (rb *ringBuf) enqueue(c chan struct{}) *notifyee {
if rb.len == bufferSize {
return nil
}
i := (rb.head + rb.len) % bufferSize
rb.buf[i] = notifyee{c: c}
rb.len++
return &rb.buf[i]
}
func (rb *ringBuf) dequeue() {
// NB: the notifyQueue never contains an empty ringBuf.
if rb.len == 0 {
panic("cannot dequeue from an empty buffer")
}
rb.buf[rb.head] = notifyee{}
rb.head++
rb.head %= bufferSize
rb.len--
}
|
package fimp
import (
"errors"
"fmt"
"github.com/futurehomeno/fimpgo"
"github.com/futurehomeno/fimpgo/fimptype/primefimp"
"github.com/mitchellh/mapstructure"
"github.com/thingsplex/tpflow/model"
"github.com/thingsplex/tpflow/node/base"
"time"
)
type VincTriggerNode struct {
base.BaseNode
ctx *model.Context
transport *fimpgo.MqttTransport
msgInStream fimpgo.MessageCh
msgInStreamName string
config VincTriggerConfig
}
type VincTriggerConfig struct {
Timeout int64 // in seconds
ValueFilter string
InputVariableType string
IsValueFilterEnabled bool
EventType string // mode/shortcut
}
func NewVincTriggerNode(flowOpCtx *model.FlowOperationalContext, meta model.MetaNode, ctx *model.Context) model.Node {
node := VincTriggerNode{ctx: ctx}
node.SetStartNode(true)
node.SetMsgReactorNode(true)
node.SetFlowOpCtx(flowOpCtx)
node.SetMeta(meta)
node.config = VincTriggerConfig{}
node.msgInStreamName = node.FlowOpCtx().FlowId + "_" + string(node.GetMetaNode().Id)
node.SetupBaseNode()
return &node
}
func (node *VincTriggerNode) Init() error {
node.initSubscriptions()
return nil
}
func (node *VincTriggerNode) Cleanup() error {
node.transport.UnregisterChannel(node.msgInStreamName)
return nil
}
func (node *VincTriggerNode) initSubscriptions() {
node.GetLog().Info("TriggerNode is listening for events . Name = ", node.Meta().Label)
node.transport.Subscribe("pt:j1/mt:cmd/rt:app/rn:vinculum/ad:1")
node.transport.Subscribe("pt:j1/mt:evt/rt:app/rn:vinculum/ad:1")
node.msgInStream = make(fimpgo.MessageCh, 10)
node.transport.RegisterChannelWithFilter(node.msgInStreamName, node.msgInStream,fimpgo.FimpFilter{
Topic: "pt:j1/+/rt:app/rn:vinculum/ad:1",
Service: "vinculum",
Interface: "*",
})
}
func (node *VincTriggerNode) LoadNodeConfig() error {
err := mapstructure.Decode(node.Meta().Config, &node.config)
if err != nil {
node.GetLog().Error("Error while decoding node configs.Err:", err)
}
var ok bool
fimpTransportInstance := node.ConnectorRegistry().GetInstance("fimpmqtt")
if fimpTransportInstance != nil {
node.transport, ok = fimpTransportInstance.Connection.GetConnection().(*fimpgo.MqttTransport)
if !ok {
node.GetLog().Error("can't cast connection to mqttfimpgo ")
return errors.New("can't cast connection to mqttfimpgo ")
}
} else {
node.GetLog().Error("Connector registry doesn't have fimp instance")
return errors.New("can't find fimp connector")
}
return err
}
// WaitForEvent is started during flow initialization or from another flow .
// Method acts as event listener and creates flow on new event .
func (node *VincTriggerNode) WaitForEvent(nodeEventStream chan model.ReactorEvent) {
node.SetReactorRunning(true)
timeout := time.Second * time.Duration(node.config.Timeout)
var timer *time.Timer
if timeout == 0 {
timer = time.NewTimer(time.Hour * 24)
timer.Stop()
}else {
timer = time.NewTimer(timeout)
}
defer func() {
node.SetReactorRunning(false)
node.GetLog().Debug("Msg processed by the node ")
timer.Stop()
}()
for {
if timeout > 0 {
timer.Reset(timeout)
}
select {
case newMsg := <-node.msgInStream:
var eventValue string
if newMsg.Payload.Type == "cmd.pd7.request" {
request := primefimp.Request{}
err := newMsg.Payload.GetObjectValue(&request)
if err != nil {
continue
}
if request.Component == "shortcut" && request.Cmd == "set" {
node.GetLog().Info("shortcut")
if node.config.EventType == "shortcut" {
eventValue = fmt.Sprintf("%.0f",request.Id)
}
}
}else if newMsg.Payload.Type == "evt.pd7.notify" {
notify := primefimp.Notify{}
err := newMsg.Payload.GetObjectValue(¬ify)
if err != nil {
continue
}
if notify.Component == "hub" && notify.Cmd == "set" {
if node.config.EventType == "mode" {
hub := notify.GetModeChange()
if hub != nil {
eventValue = hub.Current
}else {
node.GetLog().Info("ERROR 2")
}
}
}
}
if eventValue != "" {
node.GetLog().Infof("Home event = %s",eventValue)
if !node.config.IsValueFilterEnabled || ((eventValue == node.config.ValueFilter) && node.config.IsValueFilterEnabled) {
node.GetLog().Debug("Starting flow")
rMsg := model.Message{Payload: fimpgo.FimpMessage{Value: eventValue, ValueType: fimpgo.VTypeString}}
newEvent := model.ReactorEvent{Msg: rMsg, TransitionNodeId: node.Meta().SuccessTransition}
// Flow is executed within flow runner goroutine
node.FlowRunner()(newEvent)
}
}
case <-timer.C:
node.GetLog().Debug("Timeout ")
newEvent := model.ReactorEvent{TransitionNodeId: node.Meta().TimeoutTransition}
node.GetLog().Debug("Starting new flow (timeout)")
node.FlowRunner()(newEvent)
node.GetLog().Debug("Flow started (timeout) ")
case signal := <-node.FlowOpCtx().TriggerControlSignalChannel:
node.GetLog().Debug("Control signal ")
if signal == model.SIGNAL_STOP {
node.GetLog().Info("VincTrigger stopped by SIGNAL_STOP ")
return
}else {
time.Sleep(50*time.Millisecond)
}
}
}
}
func (node *VincTriggerNode) OnInput(msg *model.Message) ([]model.NodeID, error) {
return nil, nil
}
/*
pt:j1/mt:cmd/rt:app/rn:vinculum/ad:1
{
"corid": "",
"ctime": "2020-03-04T17:05:13.836283",
"props": null,
"serv": "vinculum",
"tags": null,
"type": "cmd.pd7.request",
"uid": "eea412c0-5e31-11ea-f2e6-9547cfed7ea1",
"val_t": "object",
"ver": "1",
"val": {
"cmd": "set",
"component": "shortcut",
"id": 9,
"client": null,
"param": null,
"requestId": null
},
"resp_to": "pt:j1/mt:rsp/rt:cloud/rn:remote-client/ad:smarthome-app",
"src": "app"
}
*/
/*
{
"corid": "",
"ctime": "2020-03-04T17:05:48+0100",
"props": {},
"serv": "vinculum",
"tags": [],
"type": "evt.pd7.notify",
"uid": "8a59b1d2-5425-45c0-b177-aa30fc5d8299",
"val": {
"cmd": "set",
"component": "hub",
"id": "mode",
"param": {
"current": "sleep",
"prev": "away"
}
},
"val_t": "object",
"ver": "1"
}
pt:j1/mt:evt/rt:app/rn:vinculum/ad:1
{
"corid": "",
"ctime": "2020-03-04T17:05:48+0100",
"props": {},
"serv": "vinculum",
"tags": [],
"type": "evt.pd7.notify",
"uid": "815d394e-7e25-4a9f-af18-20fca36a08f0",
"val": {
"cmd": "set",
"component": "house",
"id": null,
"param": {
"fimp": true,
"learning": null,
"mode": "sleep",
"time": "2020-03-04T16:05:48Z",
"uptime": 718587649
}
},
"val_t": "object",
"ver": "1"
}
*/
|
package web
import (
"bytes"
"encoding/json"
"fmt"
"github.com/altwebplatform/core/storage"
"github.com/facebookgo/ensure"
"io"
"log"
"net/http"
"net/http/httptest"
"strconv"
"testing"
)
func EnsureSuccess(t *testing.T, rr *httptest.ResponseRecorder) *httptest.ResponseRecorder {
if rr.Code != 200 {
fmt.Println("ERROR ", rr.Code, ": ", rr.Body.String())
}
ensure.DeepEqual(t, rr.Code, 200)
return rr
}
func request(t *testing.T, method string, url string, body []byte, headers map[string]string) *httptest.ResponseRecorder {
var bReader io.Reader
if body != nil {
bReader = bytes.NewReader(body)
}
req, err := http.NewRequest(method, url, bReader)
if err != nil {
if t != nil {
t.Fatal(err)
} else {
panic(err)
}
}
if body != nil {
req.Header.Add("Content-Length", strconv.Itoa(len(body)))
}
for header, value := range headers {
req.Header.Add(header, value)
}
rr := httptest.NewRecorder()
CreateRouter().ServeHTTP(rr, req)
return rr
}
func TestServicesAPI(t *testing.T) {
var rr *httptest.ResponseRecorder
var services = make(map[string][]storage.Service)
// first clean up using the API
rr = request(t, "GET", "/api/v1/services", nil, nil)
EnsureSuccess(t, rr)
MustUnmarshall(&services, rr.Body.Bytes())
for _, service := range services["services"] {
rr = request(t, "DELETE", "/api/v1/services/"+strconv.FormatUint(service.ID, 10), nil, nil)
EnsureSuccess(t, rr)
}
rr = request(t, "POST", "/api/v1/services", MustMarshall(storage.Service{Name: "inserted"}), nil)
EnsureSuccess(t, rr)
rr = request(t, "GET", "/api/v1/services", nil, nil)
EnsureSuccess(t, rr)
MustUnmarshall(&services, rr.Body.Bytes())
ensure.True(t, len(services["services"]) == 1)
ensure.DeepEqual(t, services["services"][0].Name, "inserted")
for _, service := range services["services"] {
rr = request(t, "DELETE", "/api/v1/services/"+strconv.FormatUint(service.ID, 10), nil, nil)
EnsureSuccess(t, rr)
}
rr = request(t, "GET", "/api/v1/services", nil, nil)
EnsureSuccess(t, rr)
MustUnmarshall(&services, rr.Body.Bytes())
ensure.True(t, len(services["services"]) == 0)
}
func MustMarshall(obj interface{}) []byte {
b, err := json.Marshal(obj)
if err != nil {
log.Fatal(err)
}
return b
}
func MustUnmarshall(obj interface{}, data []byte) {
err := json.Unmarshal(data, obj)
if err != nil {
log.Fatal(err)
}
}
|
package utils
import (
"bytes"
"errors"
"io"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
)
const (
Version = "v0.2.2"
)
func CallPath(s int) string {
_, f, l, _ := runtime.Caller(s + 1)
return f + ":" + strconv.Itoa(l)
}
func PathJoin(paths ...string) string {
return filepath.Join(paths...)
}
// FileExists reports whether the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
//检测文件夹路径时候存在
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func Abs(path string) (string, error) {
if len(path) == 0 || path[0] != '~' {
return path, nil
}
usr, err := user.Current()
if err != nil {
return "", err
}
return filepath.Abs(filepath.Join(usr.HomeDir, path[1:]))
}
func MkFile(dest string) (*os.File, error) {
if temp, err := Abs(dest); err == nil {
dest = temp
}
//分割path目录
destSplitPathDirs := strings.Split(dest, string(filepath.Separator))
//检测时候存在目录
destSplitPath := ""
for _, dir := range destSplitPathDirs[:len(destSplitPathDirs)-1] {
destSplitPath = destSplitPath + dir + string(filepath.Separator)
b, _ := PathExists(destSplitPath)
if !b {
//创建目录
_ = os.Mkdir(destSplitPath, 0755)
}
}
// 覆写模式
return os.OpenFile(dest, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
}
//生成目录并拷贝文件
func CopyFile(src, dest string) (w int64, err error) {
srcFile, err := os.Open(src)
if err != nil {
return
}
defer srcFile.Close()
dstFile, err := MkFile(dest)
if err != nil {
return
}
defer dstFile.Close()
return io.Copy(dstFile, srcFile)
}
//Get the absolute path to the running directory
func GetRunnerPath() string {
if path, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
return path
}
return os.Args[0]
}
//Determine whether the current system is a Windows system?
func IsWindows() bool {
if runtime.GOOS == "windows" {
return true
}
return false
}
func ChMod(name string, mode os.FileMode) {
if !IsWindows() {
os.Chmod(name, mode)
}
}
func Exec(acts ...string) (string, error) {
if len(acts) == 0 {
return "", nil
}
//First argv must be executable,second must be argv,no space in it
cmd := exec.Command(acts[0], acts[1:]...)
out, err := cmd.CombinedOutput()
return string(out), err
}
func Home() (string, error) {
user, err := user.Current()
if nil == err {
return user.HomeDir, nil
}
// cross compile support
if "windows" == runtime.GOOS {
return homeWindows()
}
// Unix-like system, so just assume Unix
return homeUnix()
}
func homeUnix() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// If that fails, try the shell
var stdout bytes.Buffer
cmd := exec.Command("sh", "-c", "eval echo ~$USER")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func homeWindows() (string, error) {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
}
return home, nil
}
// InList 判断列表是否含有某元素
func InList(str string, list []string) bool {
for _, temp := range list {
if str == temp {
return true
}
}
return false
}
// RemoveRep 通过map主键唯一的特性过滤重复元素
func RemoveRep(slc []string) []string {
result := []string{}
tempMap := map[string]byte{} // 存放不重复主键
for _, e := range slc {
l := len(tempMap)
tempMap[e] = 0
if len(tempMap) != l { // 加入map后,map长度变化,则元素不重复
result = append(result, e)
}
}
return result
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for AuthenticationStatusEnum enum
*/
type AuthenticationStatusEnum int
/**
* Value collection for AuthenticationStatusEnum enum
*/
const (
AuthenticationStatus_KPENDING AuthenticationStatusEnum = 1 + iota
AuthenticationStatus_KSCHEDULED
AuthenticationStatus_KFINISHED
AuthenticationStatus_KREFRESHINPROGRESS
)
func (r AuthenticationStatusEnum) MarshalJSON() ([]byte, error) {
s := AuthenticationStatusEnumToValue(r)
return json.Marshal(s)
}
func (r *AuthenticationStatusEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := AuthenticationStatusEnumFromValue(s)
*r = v
return nil
}
/**
* Converts AuthenticationStatusEnum to its string representation
*/
func AuthenticationStatusEnumToValue(authenticationStatusEnum AuthenticationStatusEnum) string {
switch authenticationStatusEnum {
case AuthenticationStatus_KPENDING:
return "kPending"
case AuthenticationStatus_KSCHEDULED:
return "kScheduled"
case AuthenticationStatus_KFINISHED:
return "kFinished"
case AuthenticationStatus_KREFRESHINPROGRESS:
return "kRefreshInProgress"
default:
return "kPending"
}
}
/**
* Converts AuthenticationStatusEnum Array to its string Array representation
*/
func AuthenticationStatusEnumArrayToValue(authenticationStatusEnum []AuthenticationStatusEnum) []string {
convArray := make([]string,len( authenticationStatusEnum))
for i:=0; i<len(authenticationStatusEnum);i++ {
convArray[i] = AuthenticationStatusEnumToValue(authenticationStatusEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func AuthenticationStatusEnumFromValue(value string) AuthenticationStatusEnum {
switch value {
case "kPending":
return AuthenticationStatus_KPENDING
case "kScheduled":
return AuthenticationStatus_KSCHEDULED
case "kFinished":
return AuthenticationStatus_KFINISHED
case "kRefreshInProgress":
return AuthenticationStatus_KREFRESHINPROGRESS
default:
return AuthenticationStatus_KPENDING
}
}
|
package solutions
func multiply(num1 string, num2 string) string {
if num1 == "0" || num2 == "0" {
return "0"
}
result := make([]byte, len(num1) + len(num2))
for i := len(num2) - 1; i >= 0; i-- {
for j := len(num1) - 1; j >= 0; j-- {
current := (num2[i] - '0') * (num1[j] - '0') + result[i + j + 1]
result[i + j + 1] = current % 10
result[i + j] += current / 10
}
}
var zero int
for i := 0; i < len(result) - 1; i++ {
if result[i] != 0 {
break
}
zero++
}
for i := zero; i < len(result); i++ {
result[i] += '0'
}
return string(result[zero:])
}
|
// Copyright 2020 David Norminton. All rights reserved.
// Use of this source code is governed by a MIT License
// license that can be found in the LICENSE file.
// Package episodate uses the api provided by https://www.episodate.com to
// retrieve TV Show data. The user can view show data, add shows to a list,
// and view the created list
package episodate
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
// ApiShowUrl is the link to the show-details page of episodate minus the name of the show
const ApiShowUrl = "https://www.episodate.com/api/show-details?q="
// ShowDetails starts the routine to extract the data of the TV Show
func ShowDetails(show string) error {
data, err := GetShowData(GetApiShowUrl(show))
if err != nil {
return fmt.Errorf("Error: There was a problem getting show data! %v", err)
}
outputShowData(data)
return nil
}
// GetApiShowUrl generates the full url of the chosen show
func GetApiShowUrl(show string) string {
return ApiShowUrl + strings.ReplaceAll(show, " ", "-")
}
// Show is the structure of the api at the show-details page
type Show struct {
TvShow ShowData `json:"TvShow"`
}
// ShowData is the structure of the show-details, show data
type ShowData struct {
Id int `json:"id"`
Name string `json:"name"`
Permalink string `json:"paermalink"`
Url string `json:"url"`
Description string `json:"description"`
StartDate string `json:"start_date"`
Country string `json:"country"`
Status string `json:"status"`
Network string `json:"network"`
Rating string `json:"rating"`
Episodes []EpisodesList `json:"episodes"`
}
// EpisodesList is the structure of the data needed for the calendar
type EpisodesList struct {
Season int `json:"season"`
Episode int `json:"episode"`
Name string `json:"name"`
AirDate string `json:"air_date"`
}
// GetShowData gets the TV show json data from api
func GetShowData(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return []byte("Error"), err
}
defer resp.Body.Close()
html, err := ioutil.ReadAll(resp.Body)
if err != nil {
return []byte("Error"), err
}
return html, nil
}
// output to terminal api response of TV Show
func outputShowData(html []byte) {
var data Show
json.Unmarshal([]byte(html), &data)
fmt.Printf("Name: %s\n", data.TvShow.Name)
fmt.Printf("Permalink: %s\n", data.TvShow.Permalink)
fmt.Printf("Url: %s\n", data.TvShow.Url)
fmt.Printf("Description: %s\n", data.TvShow.Description)
fmt.Printf("StartDate: %s\n", data.TvShow.StartDate)
fmt.Printf("Country: %s\n", data.TvShow.Country)
fmt.Printf("Status: %s\n", data.TvShow.Status)
fmt.Printf("Network: %s\n", data.TvShow.Network)
fmt.Printf("Rating: %s\n", data.TvShow.Rating)
fmt.Println("------------------------------------------------")
for i := 0; i < len(data.TvShow.Episodes); i++ {
fmt.Printf("S%d E%d\n", data.TvShow.Episodes[i].Season, data.TvShow.Episodes[i].Episode)
fmt.Printf("%s \n", data.TvShow.Episodes[i].Name)
fmt.Printf("Air Date %s\n", data.TvShow.Episodes[i].AirDate)
fmt.Println("------------------------------------------------")
}
}
|
package main
import (
"os"
"bufio"
"strings"
"strconv"
)
func main() {
file, _ := os.Open("file.txt")
a := bufio.NewScanner(file)
for a.Scan() {
line := strings.Split(a.Text(), " ")
var sum int
for _, item := range line {
value, _ := strconv.Atoi(item)
sum += value
}
println(sum)
}
}
|
package handle
import (
"github.com/valyala/fasthttp"
"mygo/service"
"strconv"
)
func GetGoodsById(ctx *fasthttp.RequestCtx) {
id := ctx.UserValue("id")
gid, err := strconv.ParseInt(id.(string), 10, 64)
if err != nil {
resp.Msg = "id输入错误,请确认"
CommonWriteError(ctx, resp)
return
}
resp.Data = service.GetGoodsById(gid)
CommonWriteSuccess(ctx, resp)
}
|
/*
* @lc app=leetcode.cn id=9 lang=golang
*
* [9] 回文数
*/
// @lc code=start
func isPalindrome(x int) bool {
}
// @lc code=end
|
package controllers
import (
"christopher/helpers"
"christopher/models"
"encoding/json"
"github.com/gin-gonic/gin"
// "log"
)
type BalancePointForm struct {
Id string
User_uid string
Blance_point string `form:"blance_point"`
}
type Mygpoint struct {
G_Point float64 `json:"g_point"`
}
type MygpointDistance struct {
G_Point float64 `json:"g_point"`
Total_Distance float64 `json:"total_distance"`
}
type WorkOutAll struct {
Workout float64 `json:"workout"`
}
type UserConvertPointForm struct {
Id string
Distance string `form:"distance"`
Activity_type string `form:"activity_type"`
}
func GetMyPoint(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
user_uid := c.Params.ByName("user_uid")
var form BalancePointForm
c.Bind(&form)
myBPoint := &models.MyBPoint{
User_uid: user_uid,
}
result, msg, err := myBPoint.GetMyCurrentPoint(SERVICE_NAME)
resultDistance, msg2, err2 := myBPoint.GetTotalDistance(SERVICE_NAME)
if msg2 == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err2,
})
}
mapD := map[string]float64{"g_point": result, "total_distance": resultDistance}
mapB, _ := json.Marshal(mapD)
res := &MygpointDistance{}
json.Unmarshal(mapB, &res)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err,
})
} else {
c.JSON(200, gin.H{
"status": 200,
"data": res,
"message": "Success!",
})
}
}
func GetWorkOut(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
result, msg, err := models.GetWorkOut(SERVICE_NAME)
mapD := map[string]float64{"workout": result}
mapB, _ := json.Marshal(mapD)
res := &WorkOutAll{}
json.Unmarshal(mapB, &res)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err,
})
} else {
c.JSON(200, gin.H{
"status": 200,
"data": res,
"message": "Success!",
})
}
}
// get total point
func GetPoints(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
result, msg, err := models.GetPoints(SERVICE_NAME)
mapD := map[string]float64{"g_point": result}
mapB, _ := json.Marshal(mapD)
res := &Mygpoint{}
json.Unmarshal(mapB, &res)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err,
})
} else {
c.JSON(200, gin.H{
"status": 200,
"data": res,
"message": "Success!",
})
}
}
// get Point
func ConvertPoint(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
var form UserConvertPointForm
c.Bind(&form)
mydistance := helpers.Convert_string_to_float(form.Distance)
constant_point := models.GetConstantPoint(SERVICE_NAME, form.Activity_type)
result := helpers.ConvertPoint(mydistance, constant_point)
mapD := map[string]float64{"g_point": result}
mapB, _ := json.Marshal(mapD)
res := &Mygpoint{}
json.Unmarshal(mapB, &res)
c.JSON(200, gin.H{
"status": 200,
"data": res,
"message": "Success!",
})
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"net/http"
"net/http/httptest"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: WebUSBBlockedForUrls,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Behavior of WebUsbBlockedForUrls policy, checking that blocked URLs don't request for access to a USB device",
Contacts: []string{
"adikov@google.com", // Test author
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
Params: []testing.Param{{
Name: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}, {
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}},
Data: []string{"web_usb_blocked.html"},
})
}
// WebUSBBlockedForUrls tests the WebUsbBlockedForUrls policy.
func WebUSBBlockedForUrls(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Reserve ten seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Error("Failed to create Test API connection: ", err)
}
ui := uiauto.New(tconn)
for _, param := range []struct {
name string
expectedAsk bool // expectedAsk states whether a dialog to ask for permission should appear or not.
policy *policy.WebUsbBlockedForUrls
}{
{
name: "include_url",
expectedAsk: false,
policy: &policy.WebUsbBlockedForUrls{Val: []string{server.URL + "/web_usb_blocked.html"}},
},
{
name: "exclude_url",
expectedAsk: true,
policy: &policy.WebUsbBlockedForUrls{Val: []string{"https://my_corp_site.com/conference.html"}},
},
{
name: "unset",
expectedAsk: true,
policy: &policy.WebUsbBlockedForUrls{Stat: policy.StatusUnset},
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Error("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.policy}); err != nil {
s.Error("Failed to update policies: ", err)
}
// Setup browser based on the chrome type.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Error("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
// Open the test website.
conn, err := br.NewConn(ctx, server.URL+"/web_usb_blocked.html")
if err != nil {
s.Error("Failed to open website: ", err)
}
defer conn.Close()
if err := ui.LeftClick(nodewith.ClassName("btn"))(ctx); err != nil {
s.Fatal("Failed to right click the button: ", err)
}
cancelButton := nodewith.Name("Cancel").Role(role.Button)
if param.expectedAsk {
if err := ui.WithTimeout(10 * time.Second).WaitUntilExists(cancelButton)(ctx); err != nil {
s.Error("Failed to find the USB prompt dialog: ", err)
}
} else {
if err := ui.EnsureGoneFor(cancelButton, 10*time.Second)(ctx); err != nil {
s.Error("Failed to make sure no USB prompt dialog shows: ", err)
}
}
})
}
}
|
package main
import (
"fmt"
"stress"
)
func main() {
err := stress.StressGo()
if err != nil {
fmt.Printf("stress go error:%s", err.Error())
}
} |
package db
import (
"fmt"
"testing"
)
func TestDb(t *testing.T) {
result := make(map[string]interface{})
if err := DB().Raw("select now() as a").Find(&result).Error; err != nil {
fmt.Println("查询出错.")
} else {
fmt.Println("查询结果.", result["a"])
}
//
var confs []News
if err := DB().Model(&News{}).Limit(5).Find(&confs).Error; err == nil {
for _, conf := range confs {
fmt.Println(conf.Title)
}
}
}
type News struct {
Id int64
Title string
}
|
func minimumFromFour() int {
var n, nmin int
for i := 1; i < 5; i++ {
fmt.Scan(&n)
if i == 1 {
nmin = n
}
if i > 1 && n < nmin {
nmin = n
}
}
return nmin
}
// Напишите функцию, находящую наименьшее из четырех введённых в этой же функции чисел.
|
package main
import (
"html/template"
"os"
"github.com/gin-contrib/static"
"github.com/gin-gonic/gin"
"github.com/yuriizinets/go-ssc"
)
func funcmap() template.FuncMap {
return ssc.Funcs()
}
func main() {
g := gin.Default()
g.GET("/", func(c *gin.Context) {
ssc.RenderPage(c.Writer, &PageIndex{})
})
g.Use(static.Serve("/static/", static.LocalFile("./static", true)))
g.POST("/SSA/:Component/:Action", func(c *gin.Context) {
ssc.HandleSSA(
c.Writer,
template.Must(template.New(c.Param("Component")).Funcs(funcmap()).ParseGlob("*.html")),
c.Param("Component"),
c.PostForm("State"),
c.Param("Action"),
c.PostForm("Args"),
[]ssc.Component{
&ComponentCounter{},
&ComponentSampleBinding{},
&ComponentSampleParent{},
&ComponentSampleChild{},
},
)
})
addr := "localhost:25025"
if os.Getenv("PORT") != "" {
addr = ":" + os.Getenv("PORT")
}
g.Run(addr)
}
|
// Copyright 2020 cloudeng llc. All rights reserved.
// Use of this source code is governed by the Apache-2.0
// license that can be found in the LICENSE file.
// Package profiling provides support for enabling profiling of
// command line tools via flags.
package profiling
import (
"fmt"
"os"
"runtime/pprof"
"strings"
"cloudeng.io/errors"
)
// ProfileSpec represents a named profile and the name of the file to
// write its contents to. CPU profiling can be requested using the
// name 'cpu' rather than the CPUProfiling API calls in runtime/pprof
// that predate the named profiles.
type ProfileSpec struct {
Name string
Filename string
}
// ProfileFlag can be used to represent flags to request arbritrary profiles.
type ProfileFlag struct {
Profiles []ProfileSpec
}
// Set implements flag.Value.
func (pf *ProfileFlag) Set(v string) error {
parts := strings.Split(v, ":")
if len(parts) != 2 {
return fmt.Errorf("%v not in <profile>:<filename> format", v)
}
pf.Profiles = append(pf.Profiles, ProfileSpec{Name: parts[0], Filename: parts[1]})
return nil
}
// String implements flag.Value.
func (pf *ProfileFlag) String() string {
out := &strings.Builder{}
for i, p := range pf.Profiles {
fmt.Fprintf(out, "%s:%s", p.Name, p.Filename)
if i < len(pf.Profiles)-1 {
out.WriteByte(',')
}
}
return out.String()
}
// Get implements flag.Getter.
func (pf *ProfileFlag) Get() interface{} {
return pf.Profiles
}
func enableCPUProfiling(filename string) (func() error, error) {
if len(filename) == 0 {
return func() error { return nil }, nil
}
output, err := os.Create(filename)
if err != nil {
nerr := fmt.Errorf("could not create CPU profile: %v: %v", filename, err)
return func() error { return nerr }, nerr
}
if err := pprof.StartCPUProfile(output); err != nil {
nerr := fmt.Errorf("could not start CPU profile: %v", err)
return func() error { return nerr }, nerr
}
return func() error {
pprof.StopCPUProfile()
return output.Close()
}, nil
}
// Start enables the named profile and returns a function that
// can be used to save its contents to the specified file.
// Typical usage is as follows:
//
// save, err := profiling.Start("cpu", "cpu.out")
// if err != nil {
// panic(err)
// }
// defer save()
//
// For a heap profile simply use Start("heap", "heap.out"). Note that the
// returned save function cannot be used more than once and that Start must
// be called multiple times to create multiple heap output files for example.
// All of the predefined named profiles from runtime/pprof are supported. If
// a new, custom profile is requested, then the caller must obtain a reference
// to it via pprof.Lookup and the create profiling records appropriately.
func Start(name, filename string) (func() error, error) {
if len(name) == 0 || len(filename) == 0 {
err := fmt.Errorf("missing profile or filename: %q:%q", name, filename)
return func() error { return err }, err
}
if name == "cpu" {
save, err := enableCPUProfiling(filename)
return save, err
}
output, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0760)
if err != nil {
return func() error { return err }, err
}
p := pprof.Lookup(name)
if p == nil {
p = pprof.NewProfile(name)
}
return func() error {
errs := errors.M{}
errs.Append(p.WriteTo(output, 0))
errs.Append(output.Close())
return errs.Err()
}, nil
}
|
package global
import (
"github.com/ulricqin/goutils/filetool"
log "github.com/ulricqin/goutils/logtool"
"os"
"time"
)
const MaxCpustatHistory = 60
var CollBaseInfoInterval time.Duration
var HttpPort string
var Version string
// configuration
func initCfg() {
initHttpConfig()
initCollectBaseInfoInterval()
initVersion()
}
func initVersion() {
var err error
Version, err = filetool.ReadFileToStringNoLn("VERSION")
if err != nil {
log.Fetal("read VERSION file fail")
os.Exit(1)
}
}
func initHttpConfig() {
HttpPort = Config.String("http::port")
if HttpPort == "" {
log.Warn("http::port is blank. use default 1988")
HttpPort = "1988"
}
}
func initCollectBaseInfoInterval() {
if UseSystemConfig {
CollBaseInfoInterval = 1 * time.Second
return
}
str := Config.String("collector::base_interval_in_seconds")
if str == "" {
log.Warn("collector::base_interval_in_seconds is blank. use default 1s")
CollBaseInfoInterval = 1 * time.Second
return
}
v, err := Config.Int64("collector::base_interval_in_seconds")
if err != nil {
log.Warn("collector::base_interval_in_seconds config error")
os.Exit(1)
}
CollBaseInfoInterval = time.Duration(v) * time.Second
}
|
package main
func execSelect(input string) (string, error) {
return "TODO: Implement select executor.", nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
)
var mapConvert = map[string]string{
"F": "0",
"B": "1",
"R": "1",
"L": "0",
}
func main() {
start := time.Now()
fmt.Printf("Result is %v \n", run())
log.Printf("Code took %s", time.Since(start))
}
func run() int64 {
var maxSeatID int64
f, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
seatID := getSeatID(scanner.Text())
if seatID > maxSeatID {
maxSeatID = seatID
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return maxSeatID
}
func getSeatID(seatCode string) int64 {
for k, v := range mapConvert {
seatCode = strings.ReplaceAll(seatCode, k, v)
}
row, _ := strconv.ParseInt(seatCode[0:7], 2, 64)
col, _ := strconv.ParseInt(seatCode[7:], 2, 64)
return row*8 + col
}
|
package main
import "fmt"
func main() {
nombre := interface{}("fernando")
numero := interface{}(12)
// con interfaces no podemos usar conversiones p.ej: string(numero), tenemos
// que usar assertion que se hace con nomVar.(tipo)
// fmt.Println(12 + numero) --> no nos deja sumar un int y un interface{} tenemos que hacer assertion
// fmt.Println(12 + int(numero)) --> con una interface no podemos hacer conversion
fmt.Println(12 + numero.(int))
// esto devuelve un name, que es nombre asertado a string y un booleano
name, ok := nombre.(string)
if ok {
fmt.Println(name)
} else {
fmt.Println("no es una string")
}
numero, ok = numero.(string)
if ok {
fmt.Println(numero)
} else {
fmt.Println("no es una string")
}
}
|
package main
import (
"encoding/hex"
"fmt"
"hash/fnv"
"log"
// "math"
"reflect"
"strings"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
)
type info struct {
id []string
vals []string
}
func (in *info) Stringify() string {
return fmt.Sprintf("%v %v", hash32(strings.Join(in.id, "_")), strings.Join(in.vals, " "))
}
func hash8(s string) uint8 {
h := fnv.New32a()
h.Write([]byte(s))
return uint8(h.Sum32() % 256)
}
func hash32(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
var (
total = map[string]float64{}
totalN = map[string]float64{}
)
func addVal(to []string, name string, val int64) []string {
/*
v := float64(val)
total[name] += v
totalN[name]++
var dist float64
if v != 0 {
dist = math.Sqrt(math.Abs((total[name] / totalN[name]) / v))
}
*/
return append(to, fmt.Sprintf("%f", float32(val)))
}
func (in *info) Add(value interface{}) {
var valToAnalyze interface{}
switch l := value.(type) {
case (*layers.ICMPv4):
in.id = append(in.id, fmt.Sprintf("icmp_id=%v;seq=%v", l.Id, l.Seq))
valToAnalyze = l
case (*layers.IPv4):
in.id = append(in.id, fmt.Sprintf("ip=%v:%v", l.SrcIP.String(), l.DstIP.String()))
in.vals = append(in.vals, fmt.Sprintf("%v", hash8(l.SrcIP.String())))
in.vals = append(in.vals, fmt.Sprintf("%v", hash8(l.DstIP.String())))
valToAnalyze = l
case (*layers.TCP):
in.id = append(in.id, fmt.Sprintf("seq=%v;tcp=%v:%v", l.Seq, l.SrcPort, l.DstPort))
valToAnalyze = l
case (*layers.UDP):
in.id = append(in.id, fmt.Sprintf("udp=%v:%v", l.SrcPort, l.DstPort))
valToAnalyze = l
case (*layers.ARP):
in.id = append(in.id, fmt.Sprintf("arp=%v:%v", hex.EncodeToString(l.SourceHwAddress), hex.EncodeToString(l.DstHwAddress)))
valToAnalyze = l
default:
// skip any other network layer for now to save spaace
return
}
if valToAnalyze == nil {
return
}
reflected := reflect.ValueOf(valToAnalyze).Elem()
if reflected.Type().Kind() != reflect.Struct {
// log.Println("Unsopported type", reflected.Type().Kind())
return
}
tp := reflected.Type()
for i := 0; i < reflected.NumField(); i++ {
elem := reflected.Field(i)
elemName := tp.Field(i).Name
switch elem.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
in.vals = addVal(in.vals, elemName, elem.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
in.vals = addVal(in.vals, elemName, int64(elem.Uint()))
case reflect.Bool:
if true { // tooooo many vals; tooo slow
var out uint8
if elem.Bool() {
out = 1
}
in.vals = append(in.vals, fmt.Sprintf("%d", out))
}
default:
//log.Println("Unknown field type -- %v", elem.Type().Kind())
}
}
}
func main() {
handle, err := pcap.OpenOffline("/dev/stdin")
if err != nil {
log.Fatal(err)
}
defer handle.Close()
var first *time.Time
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
for packet := range packetSource.Packets() {
if first == nil {
first = &packet.Metadata().Timestamp
}
pkt := &info{}
pkt.vals = []string{fmt.Sprintf("%v", -first.Sub(packet.Metadata().Timestamp).Seconds())}
for _, l := range packet.Layers() {
//fmt.Println("Addin layer", l.LayerType())
pkt.Add(l)
}
fmt.Println(pkt.Stringify())
}
}
|
// Package lago provides a simple way to setup logging.
package lago
// Logger ...
type Logger interface {
Errorf(format string, args ...interface{})
Infof(format string, args ...interface{})
Warnf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
|
package tiered_cacher
type TieredCacher struct {
}
func NewTieredCacher(storage interface{}) *TieredCacher {
tieredCacher := &TieredCacher{}
return tieredCacher
}
|
package mongodb
import (
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"golang.org/x/net/context"
)
// Commodity 商品
type Commodity struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"id"`
UserID primitive.ObjectID `bson:"user_id" json:"user_id"` // 用户ID
ShopID primitive.ObjectID `bson:"shop_id" json:"shop_id"` // 商店ID
CategoryID primitive.ObjectID `bson:"category_id" json:"category_id"` // 类别ID
Name string `bson:"name" json:"name"` // 名称
Tags []string `bson:"tags" json:"tags"` // 标签
Props map[string]string `bson:"props" json:"props"` // 属性
Stock int `bson:"stock" json:"stock"` // 库存
Price int `bson:"price" json:"price"` // 价格(分)
OldPrice int `bson:"old_price" json:"old_price"` // 原始价格(分)
Weight int `bson:"weight" json:"weight"` // 重量(克)
Options map[string][]string `bson:"options" json:"options"` // 选项
OptionsPrice map[string]int `bson:"options_price" json:"options_price"` // 选项价格(分)
OptionsStock map[string]int `bson:"options_stock" json:"options_stock"` // 选项库存
Summary string `bson:"summary" json:"summary"` // 简介
Content string `bson:"content" json:"content"` // 内容
Template string `bson:"template" json:"template"` // 模版
ThumbImage string `bson:"thumb_image" json:"thumb_image"` // 缩略图
Images []string `bson:"images" json:"images"` // 轮播图
ViewCount int `bson:"view_count" json:"view_count"` // 查看次数
CommentCount int `bson:"comment_count" json:"comment_count"` // 评论次数
FavoriteCount int `bson:"favorite_count" json:"favorite_count"` // 收藏次数
Priority int `bson:"priority" json:"priority"` // 优先级
CreateTime int64 `bson:"create_time" json:"create_time"`
UpdateTime int64 `bson:"update_time" json:"update_time"`
DeleteTime int64 `bson:"delete_time" json:"delete_time"`
}
func (m *Commodity) table() string {
return "Commodity"
}
// Save 保存
func (m *Commodity) Save() bool {
m.CreateTime = time.Now().UnixNano()
result, err := InsertOne(m.table(), m)
m.ID = result.InsertedID.(primitive.ObjectID)
return err == nil
}
// Modify 修改
func (m *Commodity) Modify() bool {
m.UpdateTime = time.Now().UnixNano()
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Trash 废弃
func (m *Commodity) Trash() bool {
m.DeleteTime = time.Now().UnixNano()
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Restore 还原
func (m *Commodity) Restore() bool {
m.DeleteTime = 0
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Remove 删除
func (m *Commodity) Remove() bool {
err := DeleteOneByID(m.table(), m.ID, m)
return err == nil
}
// Get 获取详情
func (m *Commodity) Get() bool {
err := FindOneByID(m.table(), m.ID, m)
return err == nil
}
// GetByID 根据ID获取详情
func (m *Commodity) GetByID(hex string) bool {
id, _ := primitive.ObjectIDFromHex(hex)
err := FindOneByID(m.table(), id, m)
return err == nil
}
// FindOne 单个查询
func (m *Commodity) FindOne(filter interface{}) bool {
err := FindOne(m.table(), filter, m)
return err == nil
}
// InsertMany 多个插入
func (m *Commodity) InsertMany(list []interface{}) []interface{} {
if result, err := InsertMany(m.table(), list); err == nil {
return result.InsertedIDs
}
return nil
}
// FindMany 多个查询
func (m *Commodity) FindMany(filter interface{}, limit int64, sort interface{}) *[]Commodity {
if cursor, err := FindMany(m.table(), filter, limit, sort); err == nil {
var list []Commodity
for cursor.Next(context.Background()) {
var item Commodity
if err := cursor.Decode(&item); err != nil {
panic(err)
}
list = append(list, item)
}
_ = cursor.Close(context.Background())
return &list
}
return nil
}
// FindManySkip 多个分页查询
func (m *Commodity) FindManySkip(filter interface{}, skip int64, limit int64, sort interface{}) *[]Commodity {
if cursor, err := FindManySkip(m.table(), filter, skip, limit, sort); err == nil {
var list []Commodity
for cursor.Next(context.Background()) {
var item Commodity
if err := cursor.Decode(&item); err != nil {
panic(err)
}
list = append(list, item)
}
_ = cursor.Close(context.Background())
return &list
}
return nil
}
// UpdateOne 单个修改
func (m *Commodity) UpdateOne(filter interface{}, update bson.M) int64 {
update["update_time"] = time.Now().UnixNano()
if result, err := UpdateOne(m.table(), filter, update); err == nil {
return result.ModifiedCount
}
return 0
}
// UpdateMany 多个修改
func (m *Commodity) UpdateMany(filter interface{}, update bson.M) int64 {
update["update_time"] = time.Now().UnixNano()
if result, err := UpdateMany(m.table(), filter, update); err == nil {
return result.ModifiedCount
}
return 0
}
// TrashOne 单个废弃
func (m *Commodity) TrashOne(filter interface{}) int64 {
if result, err := UpdateOne(m.table(), filter, bson.M{"delete_time": time.Now().UnixNano()}); err == nil {
return result.ModifiedCount
}
return 0
}
// RestoreOne 单个还原
func (m *Commodity) RestoreOne(filter interface{}) int64 {
if result, err := UpdateOne(m.table(), filter, bson.M{"delete_time": 0}); err == nil {
return result.ModifiedCount
}
return 0
}
// TrashMany 多个废弃
func (m *Commodity) TrashMany(filter interface{}) int64 {
if result, err := UpdateMany(m.table(), filter, bson.M{"delete_time": time.Now().UnixNano()}); err == nil {
return result.ModifiedCount
}
return 0
}
// RestoreMany 多个还原
func (m *Commodity) RestoreMany(filter interface{}) int64 {
if result, err := UpdateMany(m.table(), filter, bson.M{"delete_time": 0}); err == nil {
return result.ModifiedCount
}
return 0
}
// DeleteOne 单个删除
func (m *Commodity) DeleteOne(filter interface{}) int64 {
if result, err := DeleteOne(m.table(), filter); err == nil {
return result.DeletedCount
}
return 0
}
// DeleteMany 多个删除
func (m *Commodity) DeleteMany(filter interface{}) int64 {
if result, err := DeleteMany(m.table(), filter); err == nil {
return result.DeletedCount
}
return 0
}
// Count 计数
func (m *Commodity) Count(filter interface{}) int64 {
if result, err := Count(m.table(), filter); err == nil {
return result
}
return 0
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"regexp"
)
func main(){
//我的uid = 344485144
fmt.Printf("\n\n欢迎来到粉丝数获取界面 \n\n")
var mid string
var i bool
for {
fmt.Printf("请输入要获取的UP主UID:(例如我的uid =344485144)\n")
fmt.Scanln(&mid)
fmt.Printf("\n稍等片刻...")
herf2 := "https://api.bilibili.com/x/space/acc/info?mid=" + mid + "&jsonp=jsonp"
client2 := http.Client{}
requst2, err := http.NewRequest("GET",herf2, nil)
requst2.Header.Add("Accept", "application/json, text/plain, */*")
requst2.Header.Add("Referer", "https://space.bilibili.com/" + mid + "?spm_id_from=333.788.b_765f7570696e666f.1")
requst2.Header.Add("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
if err != nil {
fmt.Printf("请求错误,请检查网络")
log.Fatal(err)
return
}
response2,err := client2.Do(requst2)
if err != nil {
log.Fatal(err)
return
}
html2,err := ioutil.ReadAll(response2.Body)
defer response2.Body.Close()
if err != nil {
log.Fatal(err)
return
}
htmlbyte2 :=string(html2)
str2 := regexp.MustCompile(`name":"(.*?)"`)
name := str2.FindAllStringSubmatch(htmlbyte2, 1)
str3 := regexp.MustCompile(`"sex":"(.*?)"`)
sex := str3.FindAllStringSubmatch(htmlbyte2, 1)
str4 := regexp.MustCompile(`"face":"(.*?)"`)
picture := str4.FindAllStringSubmatch(htmlbyte2, 1)
herf := "https://api.bilibili.com/x/relation/stat?vmid=" + mid + "&jsonp=jsonp&callbac"
client := http.Client{}
requst, err := http.NewRequest("GET", herf, nil)
if err != nil {
fmt.Printf("请求错误,请检查网络")
log.Fatal(err)
return
}
requst.Header.Add("Accept", "application/json, text/plain, */*")
requst.Header.Add("Referer", "https://space.bilibili.com/"+mid+"/fans/follow")
requst.Header.Add("User-Agent", "ozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
response, err := client.Do(requst)
if err != nil {
fmt.Printf("回应错误")
log.Fatal(err)
return
}
htmlbyte, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
fmt.Printf("字符解析错误")
log.Fatal(err)
return
}
html := string(htmlbyte)
str := regexp.MustCompile(`follower":([\d]+)`)
date := str.FindAllStringSubmatch(html, 1)
str = regexp.MustCompile(`following":([\d]+)`)
date2 := str.FindAllStringSubmatch(html, 1)
if date2==nil||sex==nil{
fmt.Println()
fmt.Printf(html,htmlbyte2)
fmt.Printf("\n请求过程中发生意料之外的事...稍后请重试")
fmt.Printf("\n获取失败,true回车->再来一次或输入false回车结束程序\n")
fmt.Scanln(&i)
if i==false{
break
}
continue
}
sex2 := sex[0][1]
name2 := name[0][1]
picture2 := picture[0][1]
fmt.Printf("\n name:%s sex:%s 头像:%s",name2,sex2,picture2)
numb := date[0][1]
numb2 := date2[0][1]
if len(numb) > 4 {
fmt.Printf(" 关注数为: %s个 粉丝数为 %s万", numb2, numb[:len(numb)-4])
} else {
fmt.Printf(" 关注数为: %s个 粉丝数为 %s", numb2, numb)
}
fmt.Printf("\n获取成功,true回车->再来一次或输入false回车结束程序\n")
fmt.Scanln(&i)
if i==false{
break
}
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package graphics contains graphics-related utility functions for local tests.
package graphics
import (
"context"
"io"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/fsutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/crash"
"chromiumos/tast/local/syslog"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddFixture(&testing.Fixture{
Name: "gpuWatchHangs",
Desc: "Check if there any gpu related hangs during a test",
Contacts: []string{"ddmail@google.com", "chromeos-gfx@google.com"},
Impl: &gpuWatchHangsFixture{},
PreTestTimeout: 2 * time.Minute,
PostTestTimeout: 2 * time.Minute,
})
testing.AddFixture(&testing.Fixture{
Name: "gpuWatchDog",
Desc: "Check if there any gpu related problems(hangs+crashes) observed during a test",
Contacts: []string{"ddmail@google.com", "chromeos-gfx@google.com"},
Parent: "gpuWatchHangs",
Impl: &gpuWatchDogFixture{},
PreTestTimeout: 5 * time.Second,
PostTestTimeout: 5 * time.Second,
})
testing.AddFixture(&testing.Fixture{
Name: "chromeGraphics",
Desc: "Logged into a user session for graphics testing",
Contacts: []string{"ddmail@google.com", "chromeos-gfx@google.com"},
Parent: "gpuWatchDog",
Impl: chrome.NewLoggedInFixture(func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
SetUpTimeout: chrome.LoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "chromeGraphicsLacros",
Desc: "Logged into a user session for graphics testing (lacros)",
Contacts: []string{"lacros-team@google.com"},
Parent: "gpuWatchDog",
Impl: chrome.NewLoggedInFixture(func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
SetUpTimeout: chrome.LoginTimeout + 7*time.Minute,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "graphicsNoChrome",
Desc: "Stop UI before tests, start UI after",
Contacts: []string{"chromeos-gfx@google.com"},
Impl: &graphicsNoChromeFixture{},
Parent: "gpuWatchHangs",
SetUpTimeout: upstart.UIRestartTimeout,
TearDownTimeout: upstart.UIRestartTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "chromeGraphicsIgt",
Desc: "Stop and later restart services for IGT",
Contacts: []string{"markyacoub@google.com, chromeos-gfx-display@google.com"},
Parent: "graphicsNoChrome",
Impl: &graphicsIgtFixture{},
SetUpTimeout: upstart.UIRestartTimeout,
TearDownTimeout: upstart.UIRestartTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "chromeGraphicsIdle",
Desc: "Logged into a user session for graphics Idle testing. This fixture starts a chrome dedicated for graphics.Idle tests",
Contacts: []string{"ddmail@google.com", "chromeos-gfx@google.com"},
Parent: "gpuWatchDog",
Impl: &graphicsIdleFixture{fOpt: []chrome.Option{}},
SetUpTimeout: chrome.LoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "chromeGraphicsIdleArc",
Desc: "Logged into a user session for graphics Idle testiang. This fixture starts an arc enabled chrome dedicated for graphics.Idle.*arc tests",
Contacts: []string{"ddmail@google.com", "chromeos-gfx@google.com"},
Parent: "gpuWatchDog",
Impl: &graphicsIdleFixture{fOpt: []chrome.Option{chrome.ARCEnabled()}},
SetUpTimeout: chrome.LoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
})
}
type graphicsNoChromeFixture struct {
}
func (f *graphicsNoChromeFixture) Reset(ctx context.Context) error {
return nil
}
func (f *graphicsNoChromeFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsNoChromeFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsNoChromeFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
s.Log("Setup: Stop Chrome UI")
if err := upstart.StopJob(ctx, "ui"); err != nil {
s.Fatal("Failed to stop ui job: ", err)
}
return nil
}
func (f *graphicsNoChromeFixture) TearDown(ctx context.Context, s *testing.FixtState) {
s.Log("Setup: Start Chrome UI")
upstart.EnsureJobRunning(ctx, "ui")
}
type gpuWatchHangsFixture struct {
regexp *regexp.Regexp
postFunc []func(ctx context.Context) error
tearDownFunc []func(ctx context.Context) error
}
func (f *gpuWatchHangsFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// TODO: This needs to be kept in sync for new drivers, especially ARM.
hangRegexStrs := []string{
`drm:i915_hangcheck_elapsed`,
`drm:i915_hangcheck_hung`,
`Hangcheck timer elapsed...`,
`drm/i915: Resetting chip after gpu hang`,
`GPU HANG:.+\b[H|h]ang on (rcs0|vcs0|vecs0)`,
`hangcheck recover!`, // Freedreno
`mtk-mdp.*: cmdq timeout`, // Mediatek
`amdgpu: GPU reset begin!`,
`scp ipi .* ack time out !`, // Mediatek
`mtk-iommu .*: fault`, // Mediatek (at least MT8183)
`qcom-venus .*video-codec: SFR message from FW:`, // Qualcomm
}
// TODO(pwang): add regex for memory faults.
f.regexp = regexp.MustCompile(strings.Join(hangRegexStrs, "|"))
s.Log("Setup regex to detect GPU hang: ", f.regexp)
if hangCheckTimer, err := GetHangCheckTimer(); err != nil {
testing.ContextLog(ctx, "Warning: failed to get hangcheck timer. This is normal for kernels older than 5.4: ", err)
} else {
testing.ContextLog(ctx, "Hangcheck timer: ", hangCheckTimer)
// Only tries to check the hangcheck timer if we successfully get the timer.
f.tearDownFunc = append(f.tearDownFunc, func(ctx context.Context) error {
tTimer, err := GetHangCheckTimer()
if err != nil {
return errors.Wrap(err, "failed to get hangcheck timer")
}
testing.ContextLogf(ctx, "Original hangcheck timer: %v, current hangcheck timer: %v", hangCheckTimer, tTimer)
if tTimer == hangCheckTimer {
return nil
}
testing.ContextLog(ctx, "The hangcheck timer is not the same. Tries to set it back to ", hangCheckTimer)
if err := SetHangCheckTimer(ctx, hangCheckTimer); err != nil {
return errors.Wrap(err, "failed to set hangcheck timer back")
}
return nil
})
}
return nil
}
func (f *gpuWatchHangsFixture) TearDown(ctx context.Context, s *testing.FixtState) {
for i := len(f.tearDownFunc) - 1; i >= 0; i-- {
if err := f.tearDownFunc[i](ctx); err != nil {
s.Error("TearDown failed: ", err)
}
}
}
func (f *gpuWatchHangsFixture) Reset(ctx context.Context) error {
return nil
}
// checkHangs checks gpu hangs from the reader. It returns error if failed to read the file or gpu hang patterns are detected.
func (f *gpuWatchHangsFixture) checkHangs(ctx context.Context, reader *syslog.Reader) error {
for {
e, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
return errors.Wrap(err, "failed to read syslog")
}
matches := f.regexp.FindAllStringSubmatch(e.Line, -1)
if len(matches) > 0 {
return errors.Errorf("GPU hang: %s", e.Line)
}
}
return nil
}
func (f *gpuWatchHangsFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
f.postFunc = nil
// Attempt flushing system logs every second instead of every 10 minutes.
dirtyWritebackDuration, err := GetDirtyWritebackDuration()
if err != nil {
s.Log("Failed to get initial dirty writeback duration: ", err)
} else {
SetDirtyWritebackDuration(ctx, 1*time.Second)
// Set dirty writeback duration to initial value even if we fails to set to 1 second. Note this implicitly calls sync.
f.postFunc = append(f.postFunc, func(ctx context.Context) error {
return SetDirtyWritebackDuration(ctx, dirtyWritebackDuration)
})
}
// syslog.NewReader reports syslog message written after it is started for GPU hang detection.
sysLogReader, err := syslog.NewReader(ctx)
if err != nil {
s.Log("Failed to get syslog reader: ", err)
} else {
f.postFunc = append(f.postFunc, func(ctx context.Context) error {
defer sysLogReader.Close()
return f.checkHangs(ctx, sysLogReader)
})
}
}
func (f *gpuWatchHangsFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
var postErr error
for i := len(f.postFunc) - 1; i >= 0; i-- {
if err := f.postFunc[i](ctx); err != nil {
postErr = errors.Wrap(postErr, err.Error())
}
}
if postErr != nil {
s.Error("PostTest failed: ", postErr)
}
}
type gpuWatchDogFixture struct {
postFunc []func(ctx context.Context) error
}
func (f *gpuWatchDogFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
return nil
}
func (f *gpuWatchDogFixture) TearDown(ctx context.Context, s *testing.FixtState) {}
func (f *gpuWatchDogFixture) Reset(ctx context.Context) error {
return nil
}
// getGPUCrash returns gpu related crash files found in system.
func (f *gpuWatchDogFixture) getGPUCrash() ([]string, error) {
crashFiles, err := crash.GetCrashes(crash.DefaultDirs()...)
if err != nil {
return nil, err
}
// Filter the gpu related crash.
var crashes []string
for _, file := range crashFiles {
if strings.HasSuffix(file, crash.GPUStateExt) {
crashes = append(crashes, file)
}
}
return crashes, nil
}
// checkNewCrashes checks the difference between the oldCrashes and the current crashes. It will try to save the new crash to outDir and return error if fails to retrieve current crashes or the list is mismatched.
func (f *gpuWatchDogFixture) checkNewCrashes(ctx context.Context, oldCrashes []string, outDir string) error {
crashes, err := f.getGPUCrash()
if err != nil {
return err
}
// Check if there're new crash files got generated during the test.
var newCrashes []string
for _, crash := range crashes {
found := false
for _, preTestCrash := range oldCrashes {
if preTestCrash == crash {
found = true
break
}
}
if !found {
newCrashes = append(newCrashes, crash)
}
}
if len(newCrashes) > 0 {
sort.Strings(newCrashes)
resultErr := errors.Errorf("found gpu crash file: %v", newCrashes)
for _, crash := range newCrashes {
destPath := filepath.Join(outDir, filepath.Base(crash))
if err := fsutil.CopyFile(crash, destPath); err != nil {
resultErr = errors.Wrapf(resultErr, "failed to copy crash file %v: %v", crash, err.Error())
}
}
return resultErr
}
return nil
}
func (f *gpuWatchDogFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
f.postFunc = nil
// Record PreTest crashes.
crashes, err := f.getGPUCrash()
if err != nil {
s.Log("Failed to get gpu crashes: ", err)
} else {
f.postFunc = append(f.postFunc, func(ctx context.Context) error {
return f.checkNewCrashes(ctx, crashes, s.OutDir())
})
}
}
func (f *gpuWatchDogFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
var postErr error
for i := len(f.postFunc) - 1; i >= 0; i-- {
if err := f.postFunc[i](ctx); err != nil {
postErr = errors.Wrap(postErr, err.Error())
}
}
if postErr != nil {
s.Error("PostTest failed: ", postErr)
}
}
type graphicsIgtFixture struct {
}
func (f *graphicsIgtFixture) Reset(ctx context.Context) error {
return nil
}
func (f *graphicsIgtFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsIgtFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsIgtFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Tests such as kms_flip requires Suspend and Wake-up which are achieved using the RTC wake-up alarm.
// tlsdated is holding /dev/rtc so IGT fails to take the lock and set a wake up alarm. Hence, it
// is required to stop the tlsdated before running the IGT test.
s.Log("SetUp: Stop tlsdated")
if err := upstart.StopJob(ctx, "tlsdated"); err != nil {
s.Fatal("Failed to stop tlsdated job: ", err)
}
return nil
}
func (f *graphicsIgtFixture) TearDown(ctx context.Context, s *testing.FixtState) {
s.Log("TearDown: Start tlsdated")
upstart.EnsureJobRunning(ctx, "tlsdated")
}
// graphicsIdleFixture starts chrome, waits till the machine is cooled down then proceed.
type graphicsIdleFixture struct {
cr *chrome.Chrome
fOpt []chrome.Option // Function to generate Chrome Options
}
func (f *graphicsIdleFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
return nil
}
func (f *graphicsIdleFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsIdleFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *graphicsIdleFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
cr, err := chrome.New(ctx, f.fOpt...)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
if err := cpu.WaitUntilStabilized(ctx, cpu.CoolDownConfig{
PollTimeout: 2 * time.Minute,
PollInterval: 2 * time.Second,
TemperatureThresholdMode: cpu.TemperatureThresholdPerModel,
TemperatureThreshold: 55000,
CoolDownMode: cpu.CoolDownPreserveUI,
}); err != nil {
s.Log("Failed to get stable CPU before running tests: ", err)
}
chrome.Lock()
f.cr = cr
return cr
}
func (f *graphicsIdleFixture) TearDown(ctx context.Context, s *testing.FixtState) {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
f.cr = nil
}
|
package config
import (
"fmt"
"strings"
)
const defaultSlotName = "gulstream"
type postgres struct {
ConnectionURI string `mapstructure:"connectionURI"`
SlotName string `mapstructure:"slotName"`
}
func (p postgres) Validate() error {
if len(p.ConnectionURI) == 0 {
return fmt.Errorf("config: postgres connection URI is empty")
}
if !strings.Contains(p.ConnectionURI, "?replication=database") {
return fmt.Errorf("config: postgres replication mode is disabled. turn on [?replication=database]")
}
return nil
}
func (p postgres) GetSlotName() string {
if len(p.SlotName) == 0 {
return defaultSlotName
}
return p.SlotName
}
|
package database
import (
"errors"
"github.com/ChristophBe/weather-data-server/data/models"
"github.com/neo4j/neo4j-go-driver/neo4j"
)
type measuringNodeRepositoryImpl struct{}
func (measuringNodeRepositoryImpl) parseMeasuringNodeFromRecord(record neo4j.Record) (interface{}, error) {
nodeData, ok := record.Get("m")
if !ok {
err := errors.New("can not parse measuring form record")
return nil, err
}
node := nodeData.(neo4j.Node)
props := node.Props()
measuringNode := models.MeasuringNode{
Id: node.Id(),
Name: parseStringProp(props["name"], ""),
Lat: parseFloatProp(props["lat"], 0.0),
Lng: parseFloatProp(props["lng"], 0.0),
IsPublic: parseBoolProp(props["isPublic"], false),
IsOutdoors: parseBoolProp(props["isOutdoors"], true),
}
return measuringNode, nil
}
func (measuringNodeRepositoryImpl) castListOfMeasuringNodes(input interface{}) (nodes []models.MeasuringNode) {
items := input.([]interface{})
nodes = make([]models.MeasuringNode, len(items))
for key, x := range items {
nodes[key] = x.(models.MeasuringNode)
}
return
}
func (m measuringNodeRepositoryImpl) FetchMeasuringNodeById(nodeId int64) (measuringNode models.MeasuringNode, err error) {
params := map[string]interface{}{"nodeId": nodeId}
stmt := "MATCH (m:MeasuringNode) WHERE id(m) = $nodeId RETURN m"
result, err := doReadTransaction(stmt, params, parseSingleItemFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return
}
return result.(models.MeasuringNode), nil
}
func (measuringNodeRepositoryImpl) FetchAllMeasuringNodeUserRelations(nodeId int64, userId int64) ([]string, error) {
params := map[string]interface{}{
"userId": userId,
"nodeId": nodeId,
}
stmt := "MATCH (u:User)-[r]->(n:MeasuringNode) WHERE id(u) = $userId and id(n) = $nodeId return type(r)"
result, err := doReadTransaction(stmt, params, func(result neo4j.Result) (res interface{}, err error) {
var relations []string
if result.Err() != nil {
return relations, result.Err()
}
for result.Next() {
relation := result.Record().GetByIndex(0).(string)
relations = append(relations, relation)
}
return relations, nil
})
if err != nil {
return make([]string, 0), err
}
return result.([]string), nil
}
func (m measuringNodeRepositoryImpl) FetchAllMeasuringNodes() ([]models.MeasuringNode, error) {
params := map[string]interface{}{}
stmt := "MATCH (m:MeasuringNode) RETURN m"
results, err := doReadTransaction(stmt, params, parseListFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return []models.MeasuringNode{}, err
}
return results.([]models.MeasuringNode), nil
}
func (m measuringNodeRepositoryImpl) CreateMeasuringNode(node models.MeasuringNode, userId int64) (models.MeasuringNode, error) {
params := map[string]interface{}{
"name": node.Name,
"lat": node.Lat,
"lng": node.Lng,
"isPublic": node.IsPublic,
"isOutdoors": node.IsOutdoors,
"ownerId": userId,
}
stmt := "MATCH (o:User) WHERE id(o) = $ownerId CREATE (o)-[:OWNER]->(m:MeasuringNode {name: $name, lat: $lat, lng: $lng, isPublic: $isPublic, isOutdoors: $isOutdoors}) RETURN m"
result, err := doWriteTransaction(stmt, params, parseSingleItemFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return models.MeasuringNode{}, err
}
return result.(models.MeasuringNode), nil
}
func (m measuringNodeRepositoryImpl) FetchNodesOwnedByUserId(userId int64) ([]models.MeasuringNode, error) {
params := map[string]interface{}{
"userId": userId,
}
stmt := "MATCH (m:MeasuringNode)<-[:OWNER]-(u:User) WITH m, u WHERE id(u) = $userId RETURN m"
results, err := doReadTransaction(stmt, params, parseListFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return []models.MeasuringNode{}, err
}
return m.castListOfMeasuringNodes(results), nil
}
func (m measuringNodeRepositoryImpl) FetchAllPublicNodes() ([]models.MeasuringNode, error) {
params := map[string]interface{}{}
stmt := "MATCH (m:MeasuringNode) WHERE m.isPublic = true RETURN m"
results, err := doReadTransaction(stmt, params, parseListFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return []models.MeasuringNode{}, err
}
return m.castListOfMeasuringNodes(results), nil
}
func (m measuringNodeRepositoryImpl) FetchAllVisibleNodesByUserId(userId int64) ([]models.MeasuringNode, error) {
params := map[string]interface{}{
"userId": userId,
}
stmt := "MATCH (m:MeasuringNode) WITH m OPTIONAL MATCH (m)<-[]-(u:User) WITH m, u WHERE m.isPublic OR id(u) = $userId RETURN m"
results, err := doReadTransaction(stmt, params, parseListFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return []models.MeasuringNode{}, err
}
return m.castListOfMeasuringNodes(results), nil
}
func (m measuringNodeRepositoryImpl) FetchAllNodesByInvitationId(invitationId int64) ([]models.MeasuringNode, error) {
params := map[string]interface{}{
"invitationId": invitationId,
}
stmt := "MATCH (m:MeasuringNode)<-[:INVITATION_FOR]-(i:Invitation) WHERE id(i) = $invitationId RETURN m"
results, err := doReadTransaction(stmt, params, parseListFromResult(m.parseMeasuringNodeFromRecord))
if err != nil {
return []models.MeasuringNode{}, err
}
return m.castListOfMeasuringNodes(results), nil
}
func (m measuringNodeRepositoryImpl) CreateAuthorisationRelation(node models.MeasuringNode, user models.User) (err error) {
params := map[string]interface{}{
"nodeId": node.Id,
"userId": user.Id,
}
stmt := "MATCH (u:User), (n:MeasuringNode) WHERE id(u) = $userId and id(n) = $nodeId CREATE (u)-[r:IS_AUTHORIZED]->(n) RETURN r"
_, err = doWriteTransaction(stmt, params, func(result neo4j.Result) (res interface{}, err error) {
return nil, result.Err()
})
return
}
|
package solutions
import (
"bytes"
"fmt"
"strconv"
"strings"
)
type Codec struct {}
func Constructor() Codec {
return Codec{}
}
func (this *Codec) serialize(root *TreeNode) string {
if root == nil {
return ""
}
var buffer bytes.Buffer
queue, size := []*TreeNode{root}, 1
for size != 0 {
pop := queue[0]
queue = queue[1:]
if pop == nil {
buffer.WriteString("#!")
continue
}
size--
buffer.WriteString(fmt.Sprintf("%d!", pop.Val))
if pop.Left != nil {
size++
}
if pop.Right != nil {
size++
}
queue = append(queue, pop.Left, pop.Right)
}
return buffer.String()
}
func (this *Codec) deserialize(data string) *TreeNode {
nodes := strings.Split(data, "!")
if len(nodes) == 1 {
return nil
}
value, _ := strconv.Atoi(nodes[0])
root := &TreeNode{Val: value}
queue := []*TreeNode{root}
flags := []int{0}
index := 0
for i := 1; i < len(nodes)-1; i++ {
var node *TreeNode
if value, errors := strconv.Atoi(nodes[i]); errors == nil {
node = &TreeNode{Val: value}
queue = append(queue, node)
flags = append(flags, 0)
}
indexNode := queue[index]
if flags[index] == 0 {
indexNode.Left = node
flags[index] = 1
} else {
indexNode.Right = node
index++
}
}
return root
}
|
package main
import (
"time"
mgrpc "github.com/asim/go-micro/plugins/client/grpc/v3"
mhttp "github.com/asim/go-micro/plugins/server/http/v3"
"github.com/asim/go-micro/v3"
"github.com/asim/go-micro/v3/logger"
"github.com/gin-gonic/gin"
pb "github.com/xpunch/go-micro-example/v3/event/proto"
pbh "github.com/xpunch/go-micro-example/v3/helloworld/proto"
)
func main() {
srv := micro.NewService(
micro.Server(mhttp.NewServer()),
micro.Client(mgrpc.NewClient()),
micro.Name("web"),
micro.Address(":80"),
)
srv.Init()
accessEvent := micro.NewEvent("accesslogs", srv.Client())
router := gin.New()
router.Use(gin.Recovery(), gin.Logger(), AccessLogMiddleware(accessEvent))
helloworldService := pbh.NewHelloworldService("helloworld", srv.Client())
statisticsService := pb.NewStatisticsService("statistics", srv.Client())
router.POST("/helloworld", func(ctx *gin.Context) {
resp, err := helloworldService.Call(ctx, &pbh.Request{Name: ctx.Query("user")})
if err != nil {
ctx.AbortWithStatusJSON(500, err)
return
}
ctx.JSON(200, resp)
})
router.GET("/statistics", func(ctx *gin.Context) {
method := ctx.Query("method")
resp, err := statisticsService.Statistics(ctx, &pb.StatisticsRequest{Method: &method})
if err != nil {
ctx.AbortWithStatusJSON(500, err)
return
}
ctx.JSON(200, resp)
})
if err := micro.RegisterHandler(srv.Server(), router); err != nil {
logger.Fatal(err)
}
if err := srv.Run(); err != nil {
logger.Error(err)
}
}
func AccessLogMiddleware(event micro.Event) gin.HandlerFunc {
return func(ctx *gin.Context) {
start := time.Now()
path := ctx.Request.URL.EscapedPath()
ctx.Next()
method := ctx.Request.Method
latency := time.Since(start)
err := event.Publish(ctx, &pb.AccessEvent{
Status: uint32(ctx.Writer.Status()),
Method: method,
Path: path,
Ip: ctx.ClientIP(),
Latency: int64(latency),
Timestamp: start.Unix(),
})
if err != nil {
logger.Warn(err)
}
}
}
|
package schema
import (
// "fmt"
// "img_tag/pkg/variable"
"github.com/jinzhu/gorm"
)
// User 用户模型
type User struct {
gorm.Model
UserName string `gorm:"column:user_name;size:64;index;default:'';not null;"` // 用户名
RealName string `gorm:"column:real_name;size:64;index;default:'';not null;"` // 真实姓名
Password string `gorm:"column:password;size:40;default:'';not null;"` // 密码(sha1(md5(明文))加密)
Email *string `gorm:"column:email;size:255;index;"` // 邮箱
Phone *string `gorm:"column:phone;size:20;index;"` // 手机号
Status int `gorm:"column:status;index;default:0;not null;"` // 状态(1:启用 2:停用)
Creator string `gorm:"column:creator;size:36;"` // 创建者
}
// func Init() {
// variable.Db.AutoMigrate(&User{})
// var user User
// variable.Db.Find(&user, 1)
// fmt.Println(user, 11111)
// }
|
package main
import (
"fmt"
"log"
"os"
"runtime/trace"
)
// 执行追踪器
// 跟踪器捕获各种各样的执行事件,如 goroutine 创建/阻塞/解锁,系统调用进入/退出/块,GC 相关事件,堆大小变化,处理器启动/停止等,并将它们以紧凑的形式写入 io.Writer 中
// 大多数事件都会捕获精确的纳秒精度时间戳和堆栈跟踪。跟踪可以稍后使用 'go tool trace' 命令进行分析
func main() {
// 创建trace.out文件
// 跟踪完毕后可以使用 "go tool trace testdata/trace.out" 命令分析
f, err := os.Create("testdata/trace.out")
if err != nil {
log.Fatalf("failed to create trace output file: %v", err)
}
defer func() {
if err := f.Close(); err != nil {
log.Fatalf("failed to close trace file: %v", err)
}
}()
// 启用当前程序的跟踪
// 跟踪时,跟踪将被缓冲并写入 w
// 如果跟踪已启用,则启动将返回错误
if err := trace.Start(f); err != nil {
log.Fatalf("failed to start trace: %v", err)
}
// 停止当前跟踪,如果有的话。在完成跟踪的所有写入后,仅停止返回
defer trace.Stop()
// 下面写自己的程序
traceTest()
}
func traceTest() {
fmt.Printf("this function will be traced\n")
}
|
package main
import "fmt"
func main() {
i := 1
// function call in the init part in for loop
for test(); i < 3; i++ {
fmt.Println(i)
}
// function assigment in the init part in for loop
fmt.Println("in assigment")
for i = 2; i < 5; i++ {
fmt.Println(i)
}
}
func test() {
fmt.Println("In test function")
}
|
package flags
const CONTENTS_EMPTY = 0 // No contents
const CONTENTS_SOLID = 0x1 // an eye is never valid in a solid
const CONTENTS_WINDOW = 0x2 // translucent, but not watery (glass)
const CONTENTS_AUX = 0x4
const CONTENTS_GRATE = 0x8 // alpha-tested "grate" textures. Bullets/sight pass through, but solids don't
const CONTENTS_SLIME = 0x10
const CONTENTS_WATER = 0x20
const CONTENTS_BLOCKLOS = 0x40 // block AI line of sight
const CONTENTS_OPAQUE = 0x80 // things that cannot be seen through (may be non-solid though)
const LAST_VISIBLE_CONTENTS = 0x80
const ALL_VISIBLE_CONTENTS = LAST_VISIBLE_CONTENTS | (LAST_VISIBLE_CONTENTS - 1)
const CONTENTS_TESTFOGVOLUME = 0x100
const CONTENTS_UNUSED = 0x200
// unused
// NOTE: If it's visible, grab from the top + update LAST_VISIBLE_CONTENTS
// if not visible, then grab from the bottom.
const CONTENTS_UNUSED6 = 0x400
const CONTENTS_TEAM1 = 0x800 // per team contents used to differentiate collisions
const CONTENTS_TEAM2 = 0x1000 // between players and objects on different teams
// ignore CONTENTS_OPAQUE on surfaces that have SURF_NODRAW
const CONTENTS_IGNORE_NODRAW_OPAQUE = 0x2000
// hits entities which are MOVETYPE_PUSH (doors, plats, etc.)
const CONTENTS_MOVEABLE = 0x4000
// remaining contents are non-visible, and don't eat brushes
const CONTENTS_AREAPORTAL = 0x8000
const CONTENTS_PLAYERCLIP = 0x10000
const CONTENTS_MONSTERCLIP = 0x20000
// currents can be added to any other contents, and may be mixed
const CONTENTS_CURRENT_0 = 0x40000
const CONTENTS_CURRENT_90 = 0x80000
const CONTENTS_CURRENT_180 = 0x100000
const CONTENTS_CURRENT_270 = 0x200000
const CONTENTS_CURRENT_UP = 0x400000
const CONTENTS_CURRENT_DOWN = 0x800000
const CONTENTS_ORIGIN = 0x1000000 // removed before bsping an entity
const CONTENTS_MONSTER = 0x2000000 // should never be on a brush, only in game
const CONTENTS_DEBRIS = 0x4000000
const CONTENTS_DETAIL = 0x8000000 // brushes to be added after vis leafs
const CONTENTS_TRANSLUCENT = 0x10000000 // auto set if any surface has trans
const CONTENTS_LADDER = 0x20000000
const CONTENTS_HITBOX = 0x40000000 // use accurate hitboxes on trace
// NOTE: These are stored in a short in the engine now. Don't use more than 16 bits
const SURF_LIGHT = 0x0001 // value will hold the light strength
const SURF_SKY2D = 0x0002 // don't draw, indicates we should skylight + draw 2d sky but not draw the 3D skybox
const SURF_SKY = 0x0004 // don't draw, but add to skybox
const SURF_WARP = 0x0008 // turbulent water warp
const SURF_TRANS = 0x0010
const SURF_NOPORTAL = 0x0020 // the surface can not have a portal placed on it
const SURF_TRIGGER = 0x0040 // FIXME: This is an xbox hack to work around elimination of trigger surfaces, which breaks occluders
const SURF_NODRAW = 0x0080 // don't bother referencing the texture
const SURF_HINT = 0x0100 // make a primary bsp splitter
const SURF_SKIP = 0x0200 // completely ignore, allowing non-closed brushes
const SURF_NOLIGHT = 0x0400 // Don't calculate light
const SURF_BUMPLIGHT = 0x0800 // calculate three lightmaps for the surface for bumpmapping
const SURF_NOSHADOWS = 0x1000 // Don't receive shadows
const SURF_NODECALS = 0x2000 // Don't receive decals
const SURF_NOCHOP = 0x4000 // Don't subdivide patches on this surface
const SURF_HITBOX = 0x8000 // surface is part of a hitbox
// -----------------------------------------------------
// spatial content masks - used for spatial queries (traceline,etc.)
// -----------------------------------------------------
const MASK_ALL = (0xFFFFFFFF)
// everything that is normally solid
const MASK_SOLID = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_WINDOW | CONTENTS_MONSTER | CONTENTS_GRATE)
// everything that blocks player movement
const MASK_PLAYERSOLID = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_PLAYERCLIP | CONTENTS_WINDOW | CONTENTS_MONSTER | CONTENTS_GRATE)
// blocks npc movement
const MASK_NPCSOLID = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_MONSTERCLIP | CONTENTS_WINDOW | CONTENTS_MONSTER | CONTENTS_GRATE)
// water physics in these contents
const MASK_WATER = (CONTENTS_WATER | CONTENTS_MOVEABLE | CONTENTS_SLIME)
// everything that blocks lighting
const MASK_OPAQUE = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_OPAQUE)
// everything that blocks lighting, but with monsters added.
const MASK_OPAQUE_AND_NPCS = (MASK_OPAQUE | CONTENTS_MONSTER)
// everything that blocks line of sight for AI
const MASK_BLOCKLOS = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_BLOCKLOS)
// everything that blocks line of sight for AI plus NPCs
const MASK_BLOCKLOS_AND_NPCS = (MASK_BLOCKLOS | CONTENTS_MONSTER)
// everything that blocks line of sight for players
const MASK_VISIBLE = (MASK_OPAQUE | CONTENTS_IGNORE_NODRAW_OPAQUE)
// everything that blocks line of sight for players, but with monsters added.
const MASK_VISIBLE_AND_NPCS = (MASK_OPAQUE_AND_NPCS | CONTENTS_IGNORE_NODRAW_OPAQUE)
// bullets see these as solid
const MASK_SHOT = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_MONSTER | CONTENTS_WINDOW | CONTENTS_DEBRIS | CONTENTS_HITBOX)
// non-raycasted weapons see this as solid (includes grates)
const MASK_SHOT_HULL = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_MONSTER | CONTENTS_WINDOW | CONTENTS_DEBRIS | CONTENTS_GRATE)
// hits solids (not grates) and passes through everything else
const MASK_SHOT_PORTAL = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_WINDOW | CONTENTS_MONSTER)
// everything normally solid, except monsters (world+brush only)
const MASK_SOLID_BRUSHONLY = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_WINDOW | CONTENTS_GRATE)
// everything normally solid for player movement, except monsters (world+brush only)
const MASK_PLAYERSOLID_BRUSHONLY = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_WINDOW | CONTENTS_PLAYERCLIP | CONTENTS_GRATE)
// everything normally solid for npc movement, except monsters (world+brush only)
const MASK_NPCSOLID_BRUSHONLY = (CONTENTS_SOLID | CONTENTS_MOVEABLE | CONTENTS_WINDOW | CONTENTS_MONSTERCLIP | CONTENTS_GRATE)
// just the world, used for route rebuilding
const MASK_NPCWORLDSTATIC = (CONTENTS_SOLID | CONTENTS_WINDOW | CONTENTS_MONSTERCLIP | CONTENTS_GRATE)
// These are things that can split areaportals
const MASK_SPLITAREAPORTAL = (CONTENTS_WATER | CONTENTS_SLIME)
// UNDONE: This is untested, any moving water
const MASK_CURRENT = (CONTENTS_CURRENT_0 | CONTENTS_CURRENT_90 | CONTENTS_CURRENT_180 | CONTENTS_CURRENT_270 | CONTENTS_CURRENT_UP | CONTENTS_CURRENT_DOWN)
// everything that blocks corpse movement
// UNDONE: Not used yet / may be deleted
const MASK_DEADSOLID = (CONTENTS_SOLID | CONTENTS_PLAYERCLIP | CONTENTS_WINDOW | CONTENTS_GRATE)
|
package uibutton
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/tilt-dev/tilt/internal/hud/server"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/internal/store/uibuttons"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
)
type Reconciler struct {
client ctrlclient.Client
wsList *server.WebsocketList
dispatcher store.Dispatcher
}
var _ reconcile.Reconciler = &Reconciler{}
func NewReconciler(client ctrlclient.Client, wsList *server.WebsocketList, store store.RStore) *Reconciler {
return &Reconciler{
client: client,
wsList: wsList,
dispatcher: store,
}
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
button := &v1alpha1.UIButton{}
err := r.client.Get(ctx, req.NamespacedName, button)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, fmt.Errorf("uibutton reconcile: %v", err)
}
if apierrors.IsNotFound(err) || button.ObjectMeta.DeletionTimestamp != nil {
r.wsList.ForEach(func(ws *server.WebsocketSubscriber) {
ws.SendUIButtonUpdate(ctx, req.NamespacedName, nil)
})
r.dispatcher.Dispatch(uibuttons.NewUIButtonDeleteAction(req.Name))
return ctrl.Result{}, nil
}
// The apiserver is the source of truth, and will ensure the engine state is up to date.
r.dispatcher.Dispatch(uibuttons.NewUIButtonUpsertAction(button))
// Add an annotation to each button that hashes the spec,
// so that we can determine that a button is unique.
hash, err := hashUIButtonSpec(button.Spec)
if err == nil && hash != button.Annotations[annotationSpecHash] {
update := button.DeepCopy()
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
update.Annotations[annotationSpecHash] = hash
err := r.client.Update(ctx, update)
if err != nil {
return ctrl.Result{}, nil
}
button = update
}
r.wsList.ForEach(func(ws *server.WebsocketSubscriber) {
ws.SendUIButtonUpdate(ctx, req.NamespacedName, button)
})
return ctrl.Result{}, nil
}
func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
b := ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.UIButton{})
return b, nil
}
|
package handler
import (
"context"
"github.com/b2wdigital/openbox-go/examples/simple/internal/pkg/model/response"
"github.com/cloudevents/sdk-go"
)
func Test1(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error {
r := cloudevents.Event{
Context: cloudevents.EventContextV1{
Source: *cloudevents.ParseURIRef("/mod3"),
Type: "samples.http.mod3",
}.AsV1(),
Data: response.Default{
Message: "Test 1!!",
},
}
resp.Event = &r
return nil
}
|
package persistent
import (
"github.com/EventStore/EventStore-Client-Go/protos/persistent"
"github.com/EventStore/EventStore-Client-Go/protos/shared"
)
func toPersistentReadRequest(
bufferSize int32,
groupName string,
streamName []byte,
) *persistent.ReadReq {
return &persistent.ReadReq{
Content: &persistent.ReadReq_Options_{
Options: &persistent.ReadReq_Options{
BufferSize: bufferSize,
GroupName: groupName,
StreamOption: &persistent.ReadReq_Options_StreamIdentifier{
StreamIdentifier: &shared.StreamIdentifier{
StreamName: streamName,
},
},
UuidOption: &persistent.ReadReq_Options_UUIDOption{
Content: &persistent.ReadReq_Options_UUIDOption_String_{
String_: nil,
},
},
},
},
}
}
|
package rootdir
type Rootdir interface {
Path() string
}
type rootdir string
func ByName(dir string) Rootdir {
d := rootdir(dir)
return &d
}
func (r *rootdir) Path() string {
return string(*r)
}
|
// Copyright 2019 Liquidata, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package diff
import (
"context"
"errors"
"time"
"github.com/dolthub/dolt/go/store/diff"
"github.com/dolthub/dolt/go/store/types"
)
type DiffSummaryProgress struct {
Adds, Removes, Changes, CellChanges, NewSize, OldSize uint64
}
// Summary reports a summary of diff changes between two values
func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Map) error {
ad := NewAsyncDiffer(1024)
ad.Start(ctx, from, to)
defer ad.Close()
ch <- DiffSummaryProgress{OldSize: from.Len(), NewSize: to.Len()}
for !ad.IsDone() {
diffs, err := ad.GetDiffs(100, time.Millisecond)
if err != nil {
return err
}
for i := range diffs {
curr := diffs[i]
err := reportChanges(curr, ch)
if err != nil {
return err
}
}
}
return nil
}
func reportChanges(change *diff.Difference, ch chan<- DiffSummaryProgress) error {
switch change.ChangeType {
case types.DiffChangeAdded:
ch <- DiffSummaryProgress{Adds: 1}
case types.DiffChangeRemoved:
ch <- DiffSummaryProgress{Removes: 1}
case types.DiffChangeModified:
oldTuple := change.OldValue.(types.Tuple)
newTuple := change.NewValue.(types.Tuple)
cellChanges, err := oldTuple.CountDifferencesBetweenTupleFields(newTuple)
if err != nil {
return err
}
ch <- DiffSummaryProgress{Changes: 1, CellChanges: cellChanges}
default:
return errors.New("unknown change type")
}
return nil
}
|
package cmd
import (
"github.com/spf13/cobra"
)
var verbose bool
var version bool
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "mp3tag",
Short: "A command line utility for manipulating the metadata in mp3 files.",
Long: `Allows viewing and manipulating the IDv3 tags in MP3 files, as well
as file renaming. For example:
`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
if version {
printVersion()
} else {
cmd.Help()
}
},
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
cobra.CheckErr(rootCmd.Execute())
}
func init() {
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "show some extra output")
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().BoolVar(&version, "version", false, "show the application version")
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64 || arm64
// +build amd64 arm64
// Package atomicbitops provides extensions to the sync/atomic package.
//
// All read-modify-write operations implemented by this package have
// acquire-release memory ordering (like sync/atomic).
//
// +checkalignedignore
package atomicbitops
// AndUint32 atomically applies bitwise AND operation to *addr with val.
func AndUint32(addr *Uint32, val uint32) {
andUint32(&addr.value, val)
}
func andUint32(addr *uint32, val uint32)
// OrUint32 atomically applies bitwise OR operation to *addr with val.
func OrUint32(addr *Uint32, val uint32) {
orUint32(&addr.value, val)
}
func orUint32(addr *uint32, val uint32)
// XorUint32 atomically applies bitwise XOR operation to *addr with val.
func XorUint32(addr *Uint32, val uint32) {
xorUint32(&addr.value, val)
}
func xorUint32(addr *uint32, val uint32)
// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
// the value previously stored at addr.
func CompareAndSwapUint32(addr *Uint32, old, new uint32) uint32 {
return compareAndSwapUint32(&addr.value, old, new)
}
func compareAndSwapUint32(addr *uint32, old, new uint32) uint32
// AndUint64 atomically applies bitwise AND operation to *addr with val.
func AndUint64(addr *Uint64, val uint64) {
andUint64(&addr.value, val)
}
func andUint64(addr *uint64, val uint64)
// OrUint64 atomically applies bitwise OR operation to *addr with val.
func OrUint64(addr *Uint64, val uint64) {
orUint64(&addr.value, val)
}
func orUint64(addr *uint64, val uint64)
// XorUint64 atomically applies bitwise XOR operation to *addr with val.
func XorUint64(addr *Uint64, val uint64) {
xorUint64(&addr.value, val)
}
func xorUint64(addr *uint64, val uint64)
// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
// the value previously stored at addr.
func CompareAndSwapUint64(addr *Uint64, old, new uint64) uint64 {
return compareAndSwapUint64(&addr.value, old, new)
}
func compareAndSwapUint64(addr *uint64, old, new uint64) uint64
|
package maccount
import (
"time"
"webserver/lib/jsonlib"
"webserver/models"
)
type Authenticate struct {
//RecordBase
Id int
UserId int
Type int
Value int
Info string
Image string
Extra string
Status int
CreatedAt time.Time //string
UpdatedAt time.Time //string
extra *jsonlib.ParsedJson
}
func FindAuthenticateByStatus(htype, status interface{}, offset, limit int) ([]Authenticate, error) {
var auth []Authenticate
err := models.GetDb().Where("`type` = ? and status = ?", htype, status).Order("id asc").Offset(offset).Limit(limit).Find(&auth).Error
return auth, err
}
func FindAuthByStatus(htype, status interface{}) (*Authenticate, error) {
auth := &Authenticate{}
err := models.GetDb().Where("`type` = ? and status = ?", htype, status).Order("id asc").First(auth).Error
return auth, err
}
func FindAuthenticateByUserId(userId, htype interface{}) ([]Authenticate, error) {
var auth []Authenticate
err := models.GetDb().Where("user_id = ? and `type` = ?", userId, htype).Order("id desc").Find(&auth).Error
return auth, err
}
func FindAuthenticateCount(htype, status interface{}) int {
var count int
models.GetDb().Table("authenticates").Where("`type` = ? and status = ?", htype, status).Count(&count)
return count
}
func (self *Authenticate) GetExtra() *jsonlib.ParsedJson {
if self.extra == nil {
self.extra, _ = jsonlib.NewJson(self.Extra)
}
return self.extra
}
|
package storageoscluster
import (
"context"
"errors"
"fmt"
"sigs.k8s.io/controller-runtime/pkg/client"
storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
)
// ErrNoCluster is the error when there's no running StorageOS cluster found.
var ErrNoCluster = errors.New("no storageos cluster found")
// GetCurrentStorageOSCluster returns the currently running StorageOS cluster.
func GetCurrentStorageOSCluster(kclient client.Client) (*storageosv1.StorageOSCluster, error) {
var currentCluster *storageosv1.StorageOSCluster
// Get a list of all the StorageOS clusters.
clusterList := &storageosv1.StorageOSClusterList{}
listOpts := []client.ListOption{}
if err := kclient.List(context.Background(), clusterList, listOpts...); err != nil {
return nil, fmt.Errorf("failed to list storageos clusters: %v", err)
}
// If there's only one cluster, return it as the current cluster.
if len(clusterList.Items) == 1 {
currentCluster = &clusterList.Items[0]
}
// If there are multiple clusters, consider the status of the cluster.
for _, cluster := range clusterList.Items {
cluster := cluster
// Only one cluster can be in running phase at a time.
if cluster.Status.Phase == storageosv1.ClusterPhaseRunning {
currentCluster = &cluster
break
}
}
// If no current cluster found, fail.
if currentCluster != nil {
return currentCluster, nil
}
return currentCluster, ErrNoCluster
}
|
// Copyright (c) 2013 Mathieu Turcotte
// Licensed under the MIT license.
package main
import (
"flag"
"fmt"
bc "github.com/MathieuTurcotte/go-browserchannel/browserchannel"
"log"
"net/http"
"sync"
)
var publicDir = flag.String("public_directory", "", "path to public directory")
var closureDir = flag.String("closure_directory", "", "path to closure directory")
var port = flag.String("port", "8080", "the port to listen on")
var hostname = flag.String("hostname", "hpenvy.local", "the server hostname")
var channels = struct {
sync.RWMutex
m map[bc.SessionId]*bc.Channel
}{m: make(map[bc.SessionId]*bc.Channel)}
func broadcast(m bc.Map) {
channels.RLock()
defer channels.RUnlock()
for _, c := range channels.m {
c.SendArray(bc.Array{fmt.Sprintf("%#v", m)})
}
}
func handleChannel(channel *bc.Channel) {
log.Printf("Handlechannel (%q)\n", channel.Sid)
channels.Lock()
channels.m[channel.Sid] = channel
channels.Unlock()
for {
m, ok := <-channel.Maps()
if !ok {
log.Printf("%s: returned with no data, closing\n", channel.Sid)
channels.Lock()
delete(channels.m, channel.Sid)
channels.Unlock()
break
}
log.Printf("%s: map: %#v\n", channel.Sid, m)
broadcast(m)
}
}
func main() {
flag.Parse()
handler := bc.NewHandler(handleChannel)
handler.SetCrossDomainPrefix(*hostname+":"+*port, []string{"bc0", "bc1"})
http.Handle("/channel/", handler)
http.Handle("/closure/", http.StripPrefix("/closure/", http.FileServer(http.Dir(*closureDir))))
http.Handle("/", http.FileServer(http.Dir(*publicDir)))
err := http.ListenAndServe(":"+*port, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"strconv"
_ "github.com/lib/pq"
)
var db *sql.DB
func init() {
var err error
db, err = sql.Open("postgres", "postgres://postgres@localhost?sslmode=disable")
if err != nil {
log.Fatal("Could not open database: ", err)
}
if err := db.Ping(); err != nil {
log.Fatal(err)
}
}
func main() {
http.HandleFunc("/books", booksIndex)
http.HandleFunc("/books/show", booksShow)
http.HandleFunc("/books/create", booksCreate)
http.ListenAndServe(":3000", nil)
}
func booksCreate(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, http.StatusText(405), 405)
return
}
isbn := r.FormValue("isbn")
title := r.FormValue("title")
author := r.FormValue("author")
if isbn == "" || title == "" || author == "" {
http.Error(w, http.StatusText(400), 400)
return
}
price, err := strconv.ParseFloat(r.FormValue("price"), 32)
if err != nil {
http.Error(w, http.StatusText(400), 400)
return
}
result, err := db.Exec("INSERT INTO books VALUES($1, $2, $3, $4)",
isbn, title, author, price)
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
rowsAffected, err := result.RowsAffected()
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
fmt.Fprintf(w, "Book %s created successfully (%d rows affected)\n", isbn, rowsAffected)
}
func booksIndex(w http.ResponseWriter, r *http.Request) {
}
func booksShow(w http.ResponseWriter, r *http.Request) {
}
|
package sll
//ListErr : error type for the list
type ListErr struct {
s string
}
func (e *ListErr) Error() string {
return e.s
}
|
package db
import (
"database/sql"
"errors"
"fmt"
"log"
"strings"
"time"
user "github.com/bketelsen/microclass/module7/userservice/proto/account"
_ "github.com/go-sql-driver/mysql"
)
var (
Url = "root:root@tcp(127.0.0.1:3306)/user"
database string
db *sql.DB
q = map[string]string{}
accountQ = map[string]string{
"delete": "DELETE from %s.%s where id = ?",
"create": `INSERT into %s.%s (
id, username, email, salt, password, created, updated)
values (?, ?, ?, ?, ?, ?, ?)`,
"update": "UPDATE %s.%s set username = ?, email = ?, updated = ? where id = ?",
"updatePassword": "UPDATE %s.%s set salt = ?, password = ?, updated = ? where id = ?",
"read": "SELECT id, username, email, salt, password, created, updated from %s.%s where id = ?",
"list": "SELECT id, username, email, salt, password, created, updated from %s.%s limit ? offset ?",
"searchUsername": "SELECT id, username, email, salt, password, created, updated from %s.%s where username = ? limit ? offset ?",
"searchEmail": "SELECT id, username, email, salt, password, created, updated from %s.%s where email = ? limit ? offset ?",
"searchUsernameAndEmail": "SELECT id, username, email, salt, password, created, updated from %s.%s where username = ? and email = ? limit ? offset ?",
}
sessionQ = map[string]string{
"createSession": "INSERT into %s.%s (id, username, created, expires) values (?, ?, ?, ?)",
"deleteSession": "DELETE from %s.%s where id = ?",
"readSession": "SELECT id, username, created, expires from %s.%s where id = ?",
}
st = map[string]*sql.Stmt{}
)
func Init() {
var d *sql.DB
var err error
parts := strings.Split(Url, "/")
if len(parts) != 2 {
panic("Invalid database url")
}
if len(parts[1]) == 0 {
panic("Invalid database name")
}
url := parts[0]
database = parts[1]
if d, err = sql.Open("mysql", url+"/"); err != nil {
log.Fatal(err)
}
if _, err := d.Exec("CREATE DATABASE IF NOT EXISTS " + database); err != nil {
log.Fatal(err)
}
d.Close()
if d, err = sql.Open("mysql", Url); err != nil {
log.Fatal(err)
}
if _, err = d.Exec(accountSchema); err != nil {
log.Fatal(err)
}
if _, err = d.Exec(sessionSchema); err != nil {
log.Fatal(err)
}
db = d
for query, statement := range accountQ {
prepared, err := db.Prepare(fmt.Sprintf(statement, database, "accounts"))
if err != nil {
log.Fatal(err)
}
st[query] = prepared
}
for query, statement := range sessionQ {
prepared, err := db.Prepare(fmt.Sprintf(statement, database, "sessions"))
if err != nil {
log.Fatal(err)
}
st[query] = prepared
}
}
func CreateSession(sess *user.Session) error {
if sess.Created == 0 {
sess.Created = time.Now().Unix()
}
if sess.Expires == 0 {
sess.Expires = time.Now().Add(time.Hour * 24 * 7).Unix()
}
_, err := st["createSession"].Exec(sess.Id, sess.Username, sess.Created, sess.Expires)
return err
}
func DeleteSession(id string) error {
_, err := st["deleteSession"].Exec(id)
return err
}
func ReadSession(id string) (*user.Session, error) {
sess := &user.Session{}
r := st["readSession"].QueryRow(id)
if err := r.Scan(&sess.Id, &sess.Username, &sess.Created, &sess.Expires); err != nil {
if err == sql.ErrNoRows {
return nil, errors.New("not found")
}
return nil, err
}
return sess, nil
}
func Create(user *user.User, salt string, password string) error {
user.Created = time.Now().Unix()
user.Updated = time.Now().Unix()
_, err := st["create"].Exec(user.Id, user.Username, user.Email, salt, password, user.Created, user.Updated)
return err
}
func Delete(id string) error {
_, err := st["delete"].Exec(id)
return err
}
func Update(user *user.User) error {
user.Updated = time.Now().Unix()
_, err := st["update"].Exec(user.Username, user.Email, user.Updated, user.Id)
return err
}
func Read(id string) (*user.User, error) {
user := &user.User{}
r := st["read"].QueryRow(id)
var s, p string
if err := r.Scan(&user.Id, &user.Username, &user.Email, &s, &p, &user.Created, &user.Updated); err != nil {
if err == sql.ErrNoRows {
return nil, errors.New("not found")
}
return nil, err
}
return user, nil
}
func Search(username, email string, limit, offset int64) ([]*user.User, error) {
var r *sql.Rows
var err error
if len(username) > 0 && len(email) > 0 {
r, err = st["searchUsernameAndEmail"].Query(username, email, limit, offset)
} else if len(username) > 0 {
r, err = st["searchUsername"].Query(username, limit, offset)
} else if len(email) > 0 {
r, err = st["searchEmail"].Query(email, limit, offset)
} else {
r, err = st["list"].Query(limit, offset)
}
if err != nil {
return nil, err
}
defer r.Close()
var users []*user.User
for r.Next() {
user := &user.User{}
var s, p string
if err := r.Scan(&user.Id, &user.Username, &user.Email, &s, &p, &user.Created, &user.Updated); err != nil {
if err == sql.ErrNoRows {
return nil, errors.New("not found")
}
return nil, err
}
users = append(users, user)
}
if r.Err() != nil {
return nil, err
}
return users, nil
}
func UpdatePassword(id string, salt string, password string) error {
_, err := st["updatePassword"].Exec(salt, password, time.Now().Unix(), id)
return err
}
func SaltAndPassword(username, email string) (string, string, error) {
var r *sql.Rows
var err error
if len(username) > 0 && len(email) > 0 {
r, err = st["searchUsernameAndEmail"].Query(username, email, 1, 0)
} else if len(username) > 0 {
r, err = st["searchUsername"].Query(username, 1, 0)
} else if len(email) > 0 {
r, err = st["searchEmail"].Query(email, 1, 0)
} else {
return "", "", errors.New("username and email cannot be blank")
}
if err != nil {
return "", "", err
}
defer r.Close()
if !r.Next() {
return "", "", errors.New("not found")
}
var salt, pass string
user := &user.User{}
if err := r.Scan(&user.Id, &user.Username, &user.Email, &salt, &pass, &user.Created, &user.Updated); err != nil {
if err == sql.ErrNoRows {
return "", "", errors.New("not found")
}
return "", "", err
}
if r.Err() != nil {
return "", "", err
}
return salt, pass, nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"os/exec"
)
func subjack(args []string) {
log.SetPrefix("[subjack] ")
initSubjack(args)
}
func initSubjack(args []string) {
// Fetch all hosts, for now restrict to live hosts (80/443)
var count int
row := db.QueryRow(`SELECT COUNT(*) FROM "Domains" WHERE ports LIKE '%80%' OR ports LIKE '%443%';`)
err := row.Scan(&count)
handleError(err)
query := `SELECT name FROM "Domains" WHERE ports LIKE '%80%' OR ports LIKE '%443%';`
hostsPath := "output/subjack/hosts.txt"
writeQueryToFile(query, hostsPath)
log.Println(fmt.Sprintf("Beginning subjack scan on %d domains.", count))
var resultsPath string = "output/subjack/results.txt"
file, err := os.Create(resultsPath)
handleError(err)
file.Close()
_, err = exec.Command("subjack", "-w", hostsPath, "-o", resultsPath, "-ssl", "-a").Output()
handleError(err)
file, err = os.Open(resultsPath)
handleError(err)
scanner := bufio.NewScanner(file)
// var hostsArray []string
found := false
for scanner.Scan() {
found = true
log.Println("Found vulnerable subdomain: " + scanner.Text())
}
file.Close()
if !found {
log.Println("No vulnerable subdomains were found.")
}
err = os.Remove(resultsPath)
handleError(err)
}
|
// Copyright 2017 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package importccl
import (
"bytes"
"context"
gosql "database/sql"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach-go/crdb"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/blobs"
"github.com/cockroachdb/cockroach/pkg/ccl/backupccl"
_ "github.com/cockroachdb/cockroach/pkg/ccl/kvccl"
_ "github.com/cockroachdb/cockroach/pkg/ccl/multiregionccl"
"github.com/cockroachdb/cockroach/pkg/ccl/multiregionccl/multiregionccltestutils"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/cloudimpl"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/jobutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/jackc/pgx"
"github.com/linkedin/goavro/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func createAvroData(
t *testing.T, name string, fields []map[string]interface{}, rows []map[string]interface{},
) string {
var data bytes.Buffer
// Set up a simple schema for the import data.
schema := map[string]interface{}{
"type": "record",
"name": name,
"fields": fields,
}
schemaStr, err := json.Marshal(schema)
require.NoError(t, err)
codec, err := goavro.NewCodec(string(schemaStr))
require.NoError(t, err)
// Create an AVRO writer from the schema.
ocf, err := goavro.NewOCFWriter(goavro.OCFConfig{
W: &data,
Codec: codec,
})
require.NoError(t, err)
for _, row := range rows {
require.NoError(t, ocf.Append([]interface{}{row}))
}
// Retrieve the AVRO encoded data.
return data.String()
}
func TestImportData(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1min under race")
const getTablesQuery = `
SELECT schema_name, table_name, type
FROM [SHOW TABLES]
ORDER BY table_name
`
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
tests := []struct {
name string
create string
with string
typ string
data string
err string
rejected string
query map[string][][]string
skipIssue int
}{
{
name: "duplicate unique index key",
create: `
a int8 primary key,
i int8,
unique index idx_f (i)
`,
typ: "CSV",
data: `1,1
2,2
3,3
4,3
5,4`,
err: "duplicate key",
},
{
name: "duplicate PK",
create: `
i int8 primary key,
s string
`,
typ: "CSV",
data: `1, A
2, B
3, C
3, D
4, E`,
err: "duplicate key",
},
{
name: "duplicate collated string key",
create: `
s string collate en_u_ks_level1 primary key
`,
typ: "CSV",
data: `a
B
c
D
d
`,
err: "duplicate key",
skipIssue: 53956,
},
{
name: "duplicate PK at sst boundary",
create: `
i int8 primary key,
s string
`,
with: `WITH sstsize = '10B'`,
typ: "CSV",
data: `1,0000000000
1,0000000001`,
err: "duplicate key",
},
{
name: "verify no splits mid row",
create: `
i int8 primary key,
s string,
b int8,
c int8,
index (s),
index (i, s),
family (i, b),
family (s, c)
`,
with: `WITH sstsize = '1B'`,
typ: "CSV",
data: `5,STRING,7,9`,
query: map[string][][]string{
`SELECT count(*) from t`: {{"1"}},
},
},
{
name: "good bytes encoding",
create: `b bytes`,
typ: "CSV",
data: `\x0143
0143`,
query: map[string][][]string{
`SELECT * from t`: {{"\x01C"}, {"0143"}},
},
},
{
name: "invalid byte",
create: `b bytes`,
typ: "CSV",
data: `\x0g`,
rejected: `\x0g` + "\n",
err: "invalid byte",
},
{
name: "bad bytes length",
create: `b bytes`,
typ: "CSV",
data: `\x0`,
rejected: `\x0` + "\n",
err: "odd length hex string",
},
{
name: "oversample",
create: `i int8`,
with: `WITH oversample = '100'`,
typ: "CSV",
data: "1",
},
{
name: "new line characters",
create: `t text`,
typ: "CSV",
data: "\"hello\r\nworld\"\n\"friend\nfoe\"\n\"mr\rmrs\"",
query: map[string][][]string{
`SELECT t from t`: {{"hello\r\nworld"}, {"friend\nfoe"}, {"mr\rmrs"}},
},
},
{
name: "CR in int8, 2 cols",
create: `a int8, b int8`,
typ: "CSV",
data: "1,2\r\n3,4\n5,6",
query: map[string][][]string{
`SELECT * FROM t ORDER BY a`: {{"1", "2"}, {"3", "4"}, {"5", "6"}},
},
},
{
name: "CR in int8, 1 col",
create: `a int8`,
typ: "CSV",
data: "1\r\n3\n5",
query: map[string][][]string{
`SELECT * FROM t ORDER BY a`: {{"1"}, {"3"}, {"5"}},
},
},
{
name: "collated strings",
create: `s string collate en_u_ks_level1`,
typ: "CSV",
data: strings.Repeat("1\n", 2000),
query: map[string][][]string{
`SELECT s, count(*) FROM t GROUP BY s`: {{"1", "2000"}},
},
skipIssue: 53957,
},
{
name: "quotes are accepted in a quoted string",
create: `s string`,
typ: "CSV",
data: `"abc""de"`,
query: map[string][][]string{
`SELECT s FROM t`: {{`abc"de`}},
},
},
{
name: "bare quote in the middle of a field that is not quoted",
create: `s string`,
typ: "CSV",
data: `abc"de`,
query: map[string][][]string{`SELECT * from t`: {{`abc"de`}}},
},
{
name: "strict quotes: bare quote in the middle of a field that is not quoted",
create: `s string`,
typ: "CSV",
with: `WITH strict_quotes`,
data: `abc"de`,
err: `parse error on line 1, column 3: bare " in non-quoted-field`,
},
{
name: "no matching quote in a quoted field",
create: `s string`,
typ: "CSV",
data: `"abc"de`,
query: map[string][][]string{`SELECT * from t`: {{`abc"de`}}},
},
{
name: "strict quotes: bare quote in the middle of a quoted field is not ok",
create: `s string`,
typ: "CSV",
with: `WITH strict_quotes`,
data: `"abc"de"`,
err: `parse error on line 1, column 4: extraneous or missing " in quoted-field`,
},
{
name: "too many imported columns",
create: `i int8`,
typ: "CSV",
data: "1,2\n3\n11,22",
err: "row 1: expected 1 fields, got 2",
rejected: "1,2\n11,22\n",
query: map[string][][]string{`SELECT * from t`: {{"3"}}},
},
{
name: "parsing error",
create: `i int8, j int8`,
typ: "CSV",
data: "not_int,2\n3,4",
err: `row 1: parse "i" as INT8: could not parse "not_int" as type int`,
rejected: "not_int,2\n",
query: map[string][][]string{`SELECT * from t`: {{"3", "4"}}},
},
// MySQL OUTFILE
// If err field is non-empty, the query filed specifies what expect
// to get from the rows that are parsed correctly (see option experimental_save_rejected).
{
name: "empty file",
create: `a string`,
typ: "DELIMITED",
data: "",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "empty field",
create: `a string, b string`,
typ: "DELIMITED",
data: "\t",
query: map[string][][]string{`SELECT * from t`: {{"", ""}}},
},
{
name: "empty line",
create: `a string`,
typ: "DELIMITED",
data: "\n",
query: map[string][][]string{`SELECT * from t`: {{""}}},
},
{
name: "too many imported columns",
create: `i int8`,
typ: "DELIMITED",
data: "1\t2\n3",
err: "row 1: too many columns, got 2 expected 1",
rejected: "1\t2\n",
query: map[string][][]string{`SELECT * from t`: {{"3"}}},
},
{
name: "cannot parse data",
create: `i int8, j int8`,
typ: "DELIMITED",
data: "bad_int\t2\n3\t4",
err: "error parsing row 1",
rejected: "bad_int\t2\n",
query: map[string][][]string{`SELECT * from t`: {{"3", "4"}}},
},
{
name: "unexpected number of columns",
create: `a string, b string`,
typ: "DELIMITED",
data: "1,2\n3\t4",
err: "row 1: unexpected number of columns, expected 2 got 1",
rejected: "1,2\n",
query: map[string][][]string{`SELECT * from t`: {{"3", "4"}}},
},
{
name: "unexpected number of columns in 1st row",
create: `a string, b string`,
typ: "DELIMITED",
data: "1,2\n3\t4",
err: "row 1: unexpected number of columns, expected 2 got 1",
rejected: "1,2\n",
query: map[string][][]string{`SELECT * from t`: {{"3", "4"}}},
},
{
name: "field enclosure",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "$foo$\tnormal",
query: map[string][][]string{
`SELECT * from t`: {{"foo", "normal"}},
},
},
{
name: "field enclosure in middle of unquoted field",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "fo$o\tb$a$z",
query: map[string][][]string{
`SELECT * from t`: {{"fo$o", "b$a$z"}},
},
},
{
name: "field enclosure in middle of quoted field",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "$fo$o$\t$b$a$z$",
query: map[string][][]string{
`SELECT * from t`: {{"fo$o", "b$a$z"}},
},
},
{
name: "unmatched field enclosure",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "$foo\tnormal\nbaz\tbar",
err: "error parsing row 1: unmatched field enclosure at start of field",
rejected: "$foo\tnormal\nbaz\tbar\n",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "unmatched field enclosure at end",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "foo$\tnormal\nbar\tbaz",
err: "row 1: unmatched field enclosure at end of field",
rejected: "foo$\tnormal\n",
query: map[string][][]string{`SELECT * from t`: {{"bar", "baz"}}},
},
{
name: "unmatched field enclosure 2nd field",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "normal\t$foo",
err: "row 1: unmatched field enclosure at start of field",
rejected: "normal\t$foo\n",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "unmatched field enclosure at end 2nd field",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$'`,
typ: "DELIMITED",
data: "normal\tfoo$",
err: "row 1: unmatched field enclosure at end of field",
rejected: "normal\tfoo$\n",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "unmatched literal",
create: `i int8`,
with: `WITH fields_escaped_by = '\'`,
typ: "DELIMITED",
data: `\`,
err: "row 1: unmatched literal",
rejected: "\\\n",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "escaped field enclosure",
create: `a string, b string`,
with: `WITH fields_enclosed_by = '$', fields_escaped_by = '\',
fields_terminated_by = ','`,
typ: "DELIMITED",
data: `\$foo\$,\$baz`,
query: map[string][][]string{
`SELECT * from t`: {{"$foo$", "$baz"}},
},
},
{
name: "weird escape char",
create: `s STRING`,
with: `WITH fields_escaped_by = '@'`,
typ: "DELIMITED",
data: "@N\nN@@@\n\nNULL",
query: map[string][][]string{
`SELECT COALESCE(s, '(null)') from t`: {{"(null)"}, {"N@\n"}, {"NULL"}},
},
},
{
name: `null and \N with escape`,
create: `s STRING`,
with: `WITH fields_escaped_by = '\'`,
typ: "DELIMITED",
data: "\\N\n\\\\N\nNULL",
query: map[string][][]string{
`SELECT COALESCE(s, '(null)') from t`: {{"(null)"}, {`\N`}, {"NULL"}},
},
},
{
name: `\N with trailing char`,
create: `s STRING`,
with: `WITH fields_escaped_by = '\'`,
typ: "DELIMITED",
data: "\\N1\nfoo",
err: "row 1: unexpected data after null encoding",
rejected: "\\N1\n",
query: map[string][][]string{`SELECT * from t`: {{"foo"}}},
},
{
name: `double null`,
create: `s STRING`,
with: `WITH fields_escaped_by = '\'`,
typ: "DELIMITED",
data: `\N\N`,
err: "row 1: unexpected null encoding",
rejected: `\N\N` + "\n",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: `null and \N without escape`,
create: `s STRING`,
typ: "DELIMITED",
data: "\\N\n\\\\N\nNULL",
query: map[string][][]string{
`SELECT COALESCE(s, '(null)') from t`: {{`\N`}, {`\\N`}, {"(null)"}},
},
},
{
name: `bytes with escape`,
create: `b BYTES`,
typ: "DELIMITED",
data: `\x`,
query: map[string][][]string{
`SELECT * from t`: {{`\x`}},
},
},
{
name: "skip 0 lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '0'`,
typ: "DELIMITED",
data: "foo,normal",
query: map[string][][]string{
`SELECT * from t`: {{"foo", "normal"}},
},
},
{
name: "skip 1 lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '1'`,
typ: "DELIMITED",
data: "a string, b string\nfoo,normal",
query: map[string][][]string{
`SELECT * from t`: {{"foo", "normal"}},
},
},
{
name: "skip 2 lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '2'`,
typ: "DELIMITED",
data: "a string, b string\nfoo,normal\nbar,baz",
query: map[string][][]string{
`SELECT * from t`: {{"bar", "baz"}},
},
},
{
name: "skip all lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '3'`,
typ: "DELIMITED",
data: "a string, b string\nfoo,normal\nbar,baz",
query: map[string][][]string{
`SELECT * from t`: {},
},
},
{
name: "skip > all lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '4'`,
typ: "DELIMITED",
data: "a string, b string\nfoo,normal\nbar,baz",
query: map[string][][]string{`SELECT * from t`: {}},
},
{
name: "skip -1 lines",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', skip = '-1'`,
typ: "DELIMITED",
data: "a string, b string\nfoo,normal",
err: "pq: skip must be >= 0",
},
{
name: "nullif empty string",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', nullif = ''`,
typ: "DELIMITED",
data: ",normal",
query: map[string][][]string{
`SELECT * from t`: {{"NULL", "normal"}},
},
},
{
name: "nullif empty string plus escape",
create: `a INT8, b INT8`,
with: `WITH fields_terminated_by = ',', fields_escaped_by = '\', nullif = ''`,
typ: "DELIMITED",
data: ",4",
query: map[string][][]string{
`SELECT * from t`: {{"NULL", "4"}},
},
},
{
name: "nullif single char string",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', nullif = 'f'`,
typ: "DELIMITED",
data: "f,normal",
query: map[string][][]string{
`SELECT * from t`: {{"NULL", "normal"}},
},
},
{
name: "nullif multiple char string",
create: `a string, b string`,
with: `WITH fields_terminated_by = ',', nullif = 'foo'`,
typ: "DELIMITED",
data: "foo,foop",
query: map[string][][]string{
`SELECT * from t`: {{"NULL", "foop"}},
},
},
// PG COPY
{
name: "unexpected escape x",
create: `b bytes`,
typ: "PGCOPY",
data: `\x`,
err: `unsupported escape sequence: \\x`,
},
{
name: "unexpected escape 3",
create: `b bytes`,
typ: "PGCOPY",
data: `\3`,
err: `unsupported escape sequence: \\3`,
},
{
name: "escapes",
create: `b bytes`,
typ: "PGCOPY",
data: `\x43\122`,
query: map[string][][]string{
`SELECT * from t`: {{"CR"}},
},
},
{
name: "normal",
create: `i int8, s string`,
typ: "PGCOPY",
data: "1\tSTR\n2\t\\N\n\\N\t\\t",
query: map[string][][]string{
`SELECT * from t`: {{"1", "STR"}, {"2", "NULL"}, {"NULL", "\t"}},
},
},
{
name: "comma delim",
create: `i int8, s string`,
typ: "PGCOPY",
with: `WITH delimiter = ','`,
data: "1,STR\n2,\\N\n\\N,\\,",
query: map[string][][]string{
`SELECT * from t`: {{"1", "STR"}, {"2", "NULL"}, {"NULL", ","}},
},
},
{
name: "size out of range",
create: `i int8`,
typ: "PGCOPY",
with: `WITH max_row_size = '10GB'`,
err: "out of range: 10000000000",
},
{
name: "line too long",
create: `i int8`,
typ: "PGCOPY",
data: "123456",
with: `WITH max_row_size = '5B'`,
err: "line too long",
},
{
name: "not enough values",
typ: "PGCOPY",
create: "a INT8, b INT8",
data: `1`,
err: "expected 2 values, got 1",
},
{
name: "too many values",
typ: "PGCOPY",
create: "a INT8, b INT8",
data: "1\t2\t3",
err: "expected 2 values, got 3",
},
// Postgres DUMP
{
name: "mismatch cols",
typ: "PGDUMP",
data: `
CREATE TABLE t (i int8);
COPY t (s) FROM stdin;
0
\.
`,
err: `targeted column "s" not found`,
},
{
name: "missing COPY done",
typ: "PGDUMP",
data: `
CREATE TABLE t (i int8);
COPY t (i) FROM stdin;
0
`,
err: `unexpected EOF`,
},
{
name: "semicolons and comments",
typ: "PGDUMP",
data: `
CREATE TABLE t (i int8);
;;;
-- nothing ;
;
-- blah
`,
query: map[string][][]string{
`SELECT * from t`: {},
},
},
{
name: "size out of range",
typ: "PGDUMP",
with: `WITH max_row_size = '10GB'`,
err: "out of range: 10000000000",
},
{
name: "line too long",
typ: "PGDUMP",
data: "CREATE TABLE t (i INT8);",
with: `WITH max_row_size = '5B'`,
err: "line too long",
},
{
name: "not enough values",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8, b INT8);
COPY t (a, b) FROM stdin;
1
\.
`,
err: "expected 2 values, got 1",
},
{
name: "too many values",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8, b INT8);
COPY t (a, b) FROM stdin;
1 2 3
\.
`,
err: "expected 2 values, got 3",
},
{
name: "too many cols",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8, b INT8);
COPY t (a, b, c) FROM stdin;
1 2 3
\.
`,
err: `targeted column "c" not found`,
},
{
name: "out-of-order and omitted COPY columns",
typ: "PGDUMP",
data: `
CREATE TABLE "public"."tbl" ("a" int primary key, "B" string, "c" int, d int DEFAULT 6);
COPY "public"."tbl" (c, "a", "B") FROM STDIN;
5 1 carrot
9 3 mango
\.
END;
`,
query: map[string][][]string{
`SELECT a, "B", c, d FROM tbl`: {
{"1", "carrot", "5", "6"},
{"3", "mango", "9", "6"},
},
},
},
{
name: "fk",
typ: "PGDUMP",
data: testPgdumpFk,
with: "WITH ignore_unsupported_statements",
query: map[string][][]string{
getTablesQuery: {
{"public", "cities", "table"},
{"public", "weather", "table"},
},
`SELECT city FROM cities`: {{"Berkeley"}},
`SELECT city FROM weather`: {{"Berkeley"}},
`SELECT dependson_name
FROM crdb_internal.backward_dependencies
`: {{"weather_city_fkey"}},
`SELECT create_statement
FROM crdb_internal.create_statements
WHERE descriptor_name in ('cities', 'weather')
ORDER BY descriptor_name
`: {{testPgdumpCreateCities}, {testPgdumpCreateWeather}},
// Verify the constraint is unvalidated.
`SHOW CONSTRAINTS FROM weather
`: {{"weather", "weather_city_fkey", "FOREIGN KEY", "FOREIGN KEY (city) REFERENCES cities(city) NOT VALID", "false"}},
},
},
{
name: "fk-circular",
typ: "PGDUMP",
data: testPgdumpFkCircular,
query: map[string][][]string{
getTablesQuery: {
{"public", "a", "table"},
{"public", "b", "table"},
},
`SELECT i, k FROM a`: {{"2", "2"}},
`SELECT j FROM b`: {{"2"}},
`SELECT dependson_name
FROM crdb_internal.backward_dependencies ORDER BY dependson_name`: {
{"a_i_fkey"},
{"a_k_fkey"},
{"b_j_fkey"},
},
`SELECT create_statement
FROM crdb_internal.create_statements
WHERE descriptor_name in ('a', 'b')
ORDER BY descriptor_name
`: {{
`CREATE TABLE public.a (
i INT8 NOT NULL,
k INT8 NULL,
CONSTRAINT a_pkey PRIMARY KEY (i ASC),
CONSTRAINT a_i_fkey FOREIGN KEY (i) REFERENCES public.b(j) NOT VALID,
CONSTRAINT a_k_fkey FOREIGN KEY (k) REFERENCES public.a(i) NOT VALID,
FAMILY "primary" (i, k)
)`}, {
`CREATE TABLE public.b (
j INT8 NOT NULL,
CONSTRAINT b_pkey PRIMARY KEY (j ASC),
CONSTRAINT b_j_fkey FOREIGN KEY (j) REFERENCES public.a(i) NOT VALID,
FAMILY "primary" (j)
)`,
}},
`SHOW CONSTRAINTS FROM a`: {
{"a", "a_i_fkey", "FOREIGN KEY", "FOREIGN KEY (i) REFERENCES b(j) NOT VALID", "false"},
{"a", "a_k_fkey", "FOREIGN KEY", "FOREIGN KEY (k) REFERENCES a(i) NOT VALID", "false"},
{"a", "a_pkey", "PRIMARY KEY", "PRIMARY KEY (i ASC)", "true"},
},
`SHOW CONSTRAINTS FROM b`: {
{"b", "b_j_fkey", "FOREIGN KEY", "FOREIGN KEY (j) REFERENCES a(i) NOT VALID", "false"},
{"b", "b_pkey", "PRIMARY KEY", "PRIMARY KEY (j ASC)", "true"},
},
},
},
{
name: "fk-skip",
typ: "PGDUMP",
data: testPgdumpFk,
with: `WITH skip_foreign_keys, ignore_unsupported_statements`,
query: map[string][][]string{
getTablesQuery: {
{"public", "cities", "table"},
{"public", "weather", "table"},
},
// Verify the constraint is skipped.
`SELECT dependson_name FROM crdb_internal.backward_dependencies`: {},
`SHOW CONSTRAINTS FROM weather`: {},
},
},
{
name: "fk unreferenced",
typ: "TABLE weather FROM PGDUMP",
data: testPgdumpFk,
with: "WITH ignore_unsupported_statements",
err: `table "public.cities" not found`,
},
{
name: "fk unreferenced skipped",
typ: "TABLE weather FROM PGDUMP",
data: testPgdumpFk,
with: `WITH skip_foreign_keys, ignore_unsupported_statements`,
query: map[string][][]string{
getTablesQuery: {{"public", "weather", "table"}},
},
},
{
name: "case sensitive table names",
typ: "PGDUMP",
data: `
CREATE TABLE t ("sPoNgE" int8);
INSERT INTO t ("sPoNgE") VALUES (1337);
`,
query: map[string][][]string{
`SELECT * from t`: {{"1337"}},
},
},
{
name: "sequence",
typ: "PGDUMP",
with: "WITH ignore_unsupported_statements",
data: `
CREATE TABLE t (a INT8);
CREATE SEQUENCE public.i_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE public.i_seq OWNED BY public.i.id;
ALTER TABLE ONLY t ALTER COLUMN a SET DEFAULT nextval('public.i_seq'::regclass);
SELECT pg_catalog.setval('public.i_seq', 10, true);
`,
query: map[string][][]string{
`SELECT nextval('i_seq')`: {{"11"}},
`SHOW CREATE SEQUENCE i_seq`: {{"i_seq", "CREATE SEQUENCE public.i_seq MINVALUE 1 MAXVALUE 9223372036854775807 INCREMENT 1 START 1"}},
},
},
{
name: "INSERT without specifying all column values",
typ: "PGDUMP",
data: `
SET standard_conforming_strings = OFF;
BEGIN;
CREATE TABLE "bob" ("a" int, "b" int, c int default 2);
INSERT INTO "bob" ("a") VALUES (1), (5);
INSERT INTO "bob" ("c", "b") VALUES (3, 2);
COMMIT
`,
with: `WITH ignore_unsupported_statements`,
query: map[string][][]string{
`SELECT * FROM bob`: {
{"1", "NULL", "2"},
{"5", "NULL", "2"},
{"NULL", "2", "3"},
},
},
},
{
name: "ALTER COLUMN x SET NOT NULL",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8 PRIMARY KEY, b INT8);
ALTER TABLE t ALTER COLUMN b SET NOT NULL;
`,
query: map[string][][]string{
`SHOW CREATE TABLE t`: {
{
"t",
`CREATE TABLE public.t (
a INT8 NOT NULL,
b INT8 NOT NULL,
CONSTRAINT "primary" PRIMARY KEY (a ASC),
FAMILY "primary" (a, b)
)`,
},
},
},
},
{
name: "ALTER COLUMN x SET VISIBLE",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8 PRIMARY KEY, b INT8, c INT8 NOT VISIBLE);
ALTER TABLE t ALTER COLUMN c SET VISIBLE;
ALTER TABLE t ALTER COLUMN b SET NOT VISIBLE;
`,
query: map[string][][]string{
`SHOW CREATE TABLE t`: {
{
"t",
`CREATE TABLE public.t (
a INT8 NOT NULL,
b INT8 NOT VISIBLE NULL,
c INT8 NULL,
CONSTRAINT "primary" PRIMARY KEY (a ASC),
FAMILY "primary" (a, b, c)
)`,
},
},
},
},
{
name: "ALTER COLUMN x SET DEFAULT",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8 PRIMARY KEY, b INT8);
ALTER TABLE t ALTER COLUMN b SET DEFAULT 8;
`,
query: map[string][][]string{
`SHOW CREATE TABLE t`: {
{
"t",
`CREATE TABLE public.t (
a INT8 NOT NULL,
b INT8 NULL DEFAULT 8:::INT8,
CONSTRAINT "primary" PRIMARY KEY (a ASC),
FAMILY "primary" (a, b)
)`,
},
},
},
},
{
name: "non-public schema",
typ: "PGDUMP",
data: `
create schema s;
create table s.t (i INT8)`,
query: map[string][][]string{
getTablesQuery: {{"s", "t", "table"}},
},
},
{
name: "many tables",
typ: "PGDUMP",
data: func() string {
var sb strings.Builder
for i := 1; i <= 100; i++ {
fmt.Fprintf(&sb, "CREATE TABLE t%d ();\n", i)
}
return sb.String()
}(),
},
{
name: "partial index",
typ: "PGDUMP",
data: `
CREATE TABLE t (a INT8, b INT8);
CREATE INDEX i ON t USING btree (a) WHERE (b > 10);
`,
err: "cannot import a table with partial indexes",
},
// Error
{
name: "unsupported import format",
create: `b bytes`,
typ: "NOPE",
err: `unsupported import format`,
},
{
name: "sequences",
create: `i int8 default nextval('s')`,
typ: "CSV",
err: `"s" not found`,
},
}
var mockRecorder struct {
syncutil.Mutex
dataString, rejectedString string
}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mockRecorder.Lock()
defer mockRecorder.Unlock()
if r.Method == "GET" {
fmt.Fprint(w, mockRecorder.dataString)
}
if r.Method == "PUT" {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
mockRecorder.rejectedString = string(body)
}
}))
defer srv.Close()
// Create and drop a table to make sure a descriptor ID gets used to verify
// ID rewrites happen correctly. Useful when running just a single test.
sqlDB.Exec(t, `CREATE TABLE blah (i int8)`)
sqlDB.Exec(t, `DROP TABLE blah`)
for _, saveRejected := range []bool{false, true} {
// this test is big and slow as is, so we can't afford to double it in race.
if util.RaceEnabled && saveRejected {
continue
}
for i, tc := range tests {
if tc.typ != "CSV" && tc.typ != "DELIMITED" && saveRejected {
continue
}
if saveRejected {
if tc.with == "" {
tc.with = "WITH experimental_save_rejected"
} else {
tc.with += ", experimental_save_rejected"
}
}
t.Run(fmt.Sprintf("%s/%s: save_rejected=%v", tc.typ, tc.name, saveRejected), func(t *testing.T) {
if tc.skipIssue != 0 {
skip.WithIssue(t, tc.skipIssue)
return
}
dbName := fmt.Sprintf("d%d", i)
sqlDB.Exec(t, fmt.Sprintf(`CREATE DATABASE %s; USE %[1]s`, dbName))
defer sqlDB.Exec(t, fmt.Sprintf(`DROP DATABASE %s`, dbName))
var q string
if tc.create != "" {
q = fmt.Sprintf(`IMPORT TABLE t (%s) %s DATA ($1) %s`, tc.create, tc.typ, tc.with)
} else {
q = fmt.Sprintf(`IMPORT %s ($1) %s`, tc.typ, tc.with)
}
t.Log(q, srv.URL, "\nFile contents:\n", tc.data)
mockRecorder.dataString = tc.data
mockRecorder.rejectedString = ""
if !saveRejected || tc.rejected == "" {
sqlDB.ExpectErr(t, tc.err, q, srv.URL)
} else {
sqlDB.Exec(t, q, srv.URL)
}
if tc.err == "" || saveRejected {
for query, res := range tc.query {
sqlDB.CheckQueryResults(t, query, res)
}
if tc.rejected != mockRecorder.rejectedString {
t.Errorf("expected:\n%q\ngot:\n%q\n", tc.rejected,
mockRecorder.rejectedString)
}
}
})
}
}
t.Run("mysqlout multiple", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE mysqlout; USE mysqlout`)
mockRecorder.dataString = "1"
sqlDB.Exec(t, `IMPORT TABLE t (s STRING) DELIMITED DATA ($1, $1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`, [][]string{{"1"}, {"1"}})
})
}
func TestImportUserDefinedTypes(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
baseDir, cleanup := testutils.TempDir(t)
defer cleanup()
tc := testcluster.StartTestCluster(
t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
// Set up some initial state for the tests.
sqlDB.Exec(t, `CREATE TYPE greeting AS ENUM ('hello', 'hi')`)
// Create some AVRO encoded data.
var avroData string
{
var data bytes.Buffer
// Set up a simple schema for the import data.
schema := map[string]interface{}{
"type": "record",
"name": "t",
"fields": []map[string]interface{}{
{
"name": "a",
"type": "string",
},
{
"name": "b",
"type": "string",
},
},
}
schemaStr, err := json.Marshal(schema)
require.NoError(t, err)
codec, err := goavro.NewCodec(string(schemaStr))
require.NoError(t, err)
// Create an AVRO writer from the schema.
ocf, err := goavro.NewOCFWriter(goavro.OCFConfig{
W: &data,
Codec: codec,
})
require.NoError(t, err)
row1 := map[string]interface{}{
"a": "hello",
"b": "hello",
}
row2 := map[string]interface{}{
"a": "hi",
"b": "hi",
}
// Add the data rows to the writer.
require.NoError(t, ocf.Append([]interface{}{row1, row2}))
// Retrieve the AVRO encoded data.
avroData = data.String()
}
tests := []struct {
create string
typ string
contents string
intoCols string
verifyQuery string
expected [][]string
errString string
}{
// Test CSV imports.
{
create: "a greeting, b greeting",
intoCols: "a, b",
typ: "CSV",
contents: "hello,hello\nhi,hi\n",
verifyQuery: "SELECT * FROM t ORDER BY a",
expected: [][]string{{"hello", "hello"}, {"hi", "hi"}},
},
// Test AVRO imports.
{
create: "a greeting, b greeting",
intoCols: "a, b",
typ: "AVRO",
contents: avroData,
verifyQuery: "SELECT * FROM t ORDER BY a",
expected: [][]string{{"hello", "hello"}, {"hi", "hi"}},
},
// Test DELIMITED imports.
{
create: "a greeting, b greeting",
intoCols: "a, b",
typ: "DELIMITED",
contents: "hello\thello\nhi\thi\n",
verifyQuery: "SELECT * FROM t ORDER BY a",
expected: [][]string{{"hello", "hello"}, {"hi", "hi"}},
},
// Test PGCOPY imports.
{
create: "a greeting, b greeting",
intoCols: "a, b",
typ: "PGCOPY",
contents: "hello\thello\nhi\thi\n",
verifyQuery: "SELECT * FROM t ORDER BY a",
expected: [][]string{{"hello", "hello"}, {"hi", "hi"}},
},
// Test table with default value
{
create: "a greeting, b greeting default 'hi'",
intoCols: "a, b",
typ: "PGCOPY",
contents: "hello\nhi\thi\n",
errString: "type OID 100052 does not exist",
},
}
// Test IMPORT INTO.
for _, test := range tests {
// Write the test data into a file.
f, err := ioutil.TempFile(baseDir, "data")
require.NoError(t, err)
n, err := f.Write([]byte(test.contents))
require.NoError(t, err)
require.Equal(t, len(test.contents), n)
// Run the import statement.
sqlDB.Exec(t, fmt.Sprintf("CREATE TABLE t (%s)", test.create))
importStmt := fmt.Sprintf("IMPORT INTO t (%s) %s DATA ($1)", test.intoCols, test.typ)
importArgs := fmt.Sprintf("nodelocal://0/%s", filepath.Base(f.Name()))
if test.errString == "" {
sqlDB.Exec(t, importStmt, importArgs)
// Ensure that the table data is as we expect.
sqlDB.CheckQueryResults(t, test.verifyQuery, test.expected)
} else {
sqlDB.ExpectErr(t, test.errString, importStmt, importArgs)
}
// Clean up after the test.
sqlDB.Exec(t, "DROP TABLE t")
}
}
const (
testPgdumpCreateCities = `CREATE TABLE public.cities (
city VARCHAR(80) NOT NULL,
CONSTRAINT cities_pkey PRIMARY KEY (city ASC),
FAMILY "primary" (city)
)`
testPgdumpCreateWeather = `CREATE TABLE public.weather (
city VARCHAR(80) NULL,
temp_lo INT8 NULL,
temp_hi INT8 NULL,
prcp FLOAT4 NULL,
date DATE NULL,
rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(),
CONSTRAINT "primary" PRIMARY KEY (rowid ASC),
CONSTRAINT weather_city_fkey FOREIGN KEY (city) REFERENCES public.cities(city) NOT VALID,
FAMILY "primary" (city, temp_lo, temp_hi, prcp, date, rowid)
)`
testPgdumpFk = `
CREATE TABLE public.cities (
city character varying(80) NOT NULL
);
ALTER TABLE public.cities OWNER TO postgres;
CREATE TABLE public.weather (
city character varying(80),
temp_lo int8,
temp_hi int8,
prcp real,
date date
);
ALTER TABLE public.weather OWNER TO postgres;
COPY public.cities (city) FROM stdin;
Berkeley
\.
COPY public.weather (city, temp_lo, temp_hi, prcp, date) FROM stdin;
Berkeley 45 53 0 1994-11-28
\.
ALTER TABLE ONLY public.cities
ADD CONSTRAINT cities_pkey PRIMARY KEY (city);
ALTER TABLE ONLY public.weather
ADD CONSTRAINT weather_city_fkey FOREIGN KEY (city) REFERENCES public.cities(city);
`
testPgdumpFkCircular = `
CREATE TABLE public.a (
i int8 NOT NULL,
k int8
);
CREATE TABLE public.b (
j int8 NOT NULL
);
COPY public.a (i, k) FROM stdin;
2 2
\.
COPY public.b (j) FROM stdin;
2
\.
ALTER TABLE ONLY public.a
ADD CONSTRAINT a_pkey PRIMARY KEY (i);
ALTER TABLE ONLY public.b
ADD CONSTRAINT b_pkey PRIMARY KEY (j);
ALTER TABLE ONLY public.a
ADD CONSTRAINT a_i_fkey FOREIGN KEY (i) REFERENCES public.b(j);
ALTER TABLE ONLY public.a
ADD CONSTRAINT a_k_fkey FOREIGN KEY (k) REFERENCES public.a(i);
ALTER TABLE ONLY public.b
ADD CONSTRAINT b_j_fkey FOREIGN KEY (j) REFERENCES public.a(i);
`
)
func TestImportRowLimit(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
avroField := []map[string]interface{}{
{
"name": "a",
"type": "int",
},
{
"name": "b",
"type": "int",
},
}
avroRows := []map[string]interface{}{
{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6},
}
avroData := createAvroData(t, "t", avroField, avroRows)
tests := []struct {
name string
create string
typ string
with string
data string
verifyQuery string
err string
expected [][]string
}{
// Test CSV imports.
{
name: "skip 1 row and limit 1 row",
create: `a string, b string`,
with: `WITH row_limit = '1', skip='1'`,
typ: "CSV",
data: "a string, b string\nfoo,normal\nbar,baz\nchocolate,cake\n",
verifyQuery: `SELECT * from t`,
expected: [][]string{{"foo", "normal"}},
},
{
name: "row limit 0",
create: `a string, b string`,
with: `WITH row_limit = '0', skip='1'`,
typ: "CSV",
data: "a string, b string\nfoo,normal\nbar,baz\nchocolate,cake\n",
verifyQuery: `SELECT * from t`,
err: "pq: row_limit must be > 0",
},
{
name: "row limit negative",
create: `a string, b string`,
with: `WITH row_limit = '-5', skip='1'`,
typ: "CSV",
data: "a string, b string\nfoo,normal\nbar,baz\nchocolate,cake\n",
verifyQuery: `SELECT * from t`,
err: "pq: row_limit must be > 0",
},
{
name: "invalid row limit",
create: `a string, b string`,
with: `WITH row_limit = 'abc', skip='1'`,
typ: "CSV",
data: "a string, b string\nfoo,normal\nbar,baz\nchocolate,cake\n",
verifyQuery: `SELECT * from t`,
err: "invalid numeric row_limit value",
},
{
name: "row limit > max rows",
create: `a string, b string`,
with: `WITH row_limit = '13', skip='1'`,
typ: "CSV",
data: "a string, b string\nfoo,normal\nbar,baz\nchocolate,cake\n",
verifyQuery: `SELECT * from t`,
expected: [][]string{{"foo", "normal"}, {"bar", "baz"}, {"chocolate", "cake"}},
},
// Test DELIMITED imports.
{
name: "tsv row limit",
create: "a string, b string",
with: `WITH row_limit = '1', skip='1'`,
typ: "DELIMITED",
data: "hello\thello\navocado\ttoast\npoached\tegg\n",
verifyQuery: `SELECT * from t`,
expected: [][]string{{"avocado", "toast"}},
},
{
name: "tsv invalid row limit",
create: `a string, b string`,
with: `WITH row_limit = 'potato', skip='1'`,
typ: "DELIMITED",
data: "hello\thello\navocado\ttoast\npoached\tegg\n",
verifyQuery: `SELECT * from t`,
err: "invalid numeric row_limit value",
},
// Test AVRO imports.
{
name: "avro row limit",
create: "a INT, b INT",
with: `WITH row_limit = '1'`,
typ: "AVRO",
data: avroData,
verifyQuery: "SELECT * FROM t",
expected: [][]string{{"1", "2"}},
},
{
name: "avro invalid row limit",
create: "a INT, b INT",
with: `WITH row_limit = 'potato'`,
typ: "AVRO",
data: avroData,
verifyQuery: `SELECT * from t`,
err: "invalid numeric row_limit value",
},
// Test PGDump imports.
{
name: "pgdump single table with insert",
typ: "PGDUMP",
data: `CREATE TABLE t (a INT, b INT);
INSERT INTO t (a, b) VALUES (1, 2), (3, 4);
`,
with: `WITH row_limit = '1'`,
verifyQuery: `SELECT * from t`,
expected: [][]string{{"1", "2"}},
},
{
name: "pgdump multiple inserts same table",
typ: "PGDUMP",
data: `CREATE TABLE t (a INT, b INT);
INSERT INTO t (a, b) VALUES (1, 2);
INSERT INTO t (a, b) VALUES (3, 4);
INSERT INTO t (a, b) VALUES (5, 6);
INSERT INTO t (a, b) VALUES (7, 8);
`,
with: `WITH row_limit = '2'`,
verifyQuery: `SELECT * from t`,
expected: [][]string{{"1", "2"}, {"3", "4"}},
},
// Test Mysql imports.
{
name: "mysqldump single table",
typ: "MYSQLDUMP",
data: `CREATE TABLE t (a INT, b INT);
INSERT INTO t (a, b) VALUES (5, 6), (7, 8);
`,
with: `WITH row_limit = '1'`,
verifyQuery: `SELECT * from t`,
expected: [][]string{{"5", "6"}},
},
{
name: "mysqldump multiple inserts same table",
typ: "MYSQLDUMP",
data: `CREATE TABLE t (a INT, b INT);
INSERT INTO t (a, b) VALUES (1, 2);
INSERT INTO t (a, b) VALUES (3, 4);
INSERT INTO t (a, b) VALUES (5, 6);
INSERT INTO t (a, b) VALUES (7, 8);
`,
with: `WITH row_limit = '2'`,
verifyQuery: `SELECT * from t`,
expected: [][]string{{"1", "2"}, {"3", "4"}},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
data = test.data
importTableQuery := fmt.Sprintf(`IMPORT TABLE t (%s) %s DATA ($1) %s`, test.create, test.typ, test.with)
if test.err != "" {
sqlDB.ExpectErr(t, test.err, importTableQuery, srv.URL)
} else {
if test.typ == "CSV" || test.typ == "AVRO" || test.typ == "DELIMITED" {
sqlDB.Exec(t, importTableQuery, srv.URL)
// Ensure that the table data is as we expect.
sqlDB.CheckQueryResults(t, test.verifyQuery, test.expected)
sqlDB.Exec(t, `DROP TABLE t`)
} else if test.typ == "PGDUMP" || test.typ == "MYSQLDUMP" {
sqlDB.Exec(t, `DROP TABLE IF EXISTS t, u`)
// Import table from dump format.
importDumpQuery := fmt.Sprintf(`IMPORT TABLE t FROM %s ($1) %s`, test.typ, test.with)
sqlDB.Exec(t, importDumpQuery, srv.URL)
sqlDB.CheckQueryResults(t, test.verifyQuery, test.expected)
sqlDB.Exec(t, `DROP TABLE t`)
// Import dump format directly.
importDumpQuery = fmt.Sprintf(`IMPORT %s ($1) %s`, test.typ, test.with)
sqlDB.Exec(t, importDumpQuery, srv.URL)
sqlDB.CheckQueryResults(t, test.verifyQuery, test.expected)
sqlDB.Exec(t, `DROP TABLE t`)
}
}
})
}
t.Run("pgdump multitable", func(t *testing.T) {
sqlDB.Exec(t, `DROP TABLE IF EXISTS simple, second, seqtable CASCADE`)
sqlDB.Exec(t, `DROP SEQUENCE IF EXISTS a_seq`)
// Data is populated from files using COPY FROM STDIN statement.
_, secondFile := getSecondPostgresDumpTestdata(t)
second := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(secondFile, baseDir))}
multitableFile := getMultiTablePostgresDumpTestdata(t)
multitable := []interface{}{fmt.Sprintf("nodelocal://0/%s", strings.TrimPrefix(multitableFile, baseDir))}
expectedRowLimit := 4
// Import a single table `second` and verify number of rows imported.
importQuery := fmt.Sprintf(`IMPORT TABLE second FROM PGDUMP ($1) WITH row_limit="%d",ignore_unsupported_statements`,
expectedRowLimit)
sqlDB.Exec(t, importQuery, second...)
var numRows int
sqlDB.QueryRow(t, "SELECT count(*) FROM second").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.Exec(t, `DROP TABLE IF EXISTS second`)
// Import multiple tables including `simple` and `second`.
expectedRowLimit = 3
importQuery = fmt.Sprintf(`IMPORT PGDUMP ($1) WITH row_limit="%d",ignore_unsupported_statements`, expectedRowLimit)
sqlDB.Exec(t, importQuery, multitable...)
sqlDB.QueryRow(t, "SELECT count(*) FROM second").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.QueryRow(t, "SELECT count(*) FROM second").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.Exec(t, `DROP TABLE simple, second, seqtable CASCADE`)
sqlDB.Exec(t, `DROP SEQUENCE a_seq`)
})
t.Run("pgdump multitable interleaved inserts", func(t *testing.T) {
sqlDB.Exec(t, `DROP TABLE IF EXISTS t, u`)
var numRows int
expectedRowLimit := 1
data = `CREATE TABLE t (a INT, b INT);
CREATE TABLE u (a INT);
INSERT INTO t (a, b) VALUES (1, 2);
INSERT INTO u (a) VALUES (100);
INSERT INTO t (a, b) VALUES (7, 8);
INSERT INTO u (a) VALUES (600);`
importDumpQuery := fmt.Sprintf(`IMPORT PGDUMP ($1) WITH row_limit="%d"`, expectedRowLimit)
sqlDB.Exec(t, importDumpQuery, srv.URL)
// Verify expectedRowLimit number of rows were imported from t, u
sqlDB.QueryRow(t, "SELECT count(*) FROM t").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.QueryRow(t, "SELECT count(*) FROM u").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
})
t.Run("mysqldump multitable", func(t *testing.T) {
sqlDB.Exec(t, `DROP TABLE IF EXISTS simple, second, third, everything CASCADE`)
sqlDB.Exec(t, `DROP SEQUENCE IF EXISTS simple_auto_inc, third_auto_inc`)
var numRows int
files := getMysqldumpTestdata(t)
simpleMysql := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.simple, baseDir))}
multitableMysql := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.wholeDB, baseDir))}
expectedRowLimit := 2
// single table
importQuery := fmt.Sprintf(`IMPORT TABLE simple FROM MYSQLDUMP ($1) WITH row_limit="%d"`, expectedRowLimit)
sqlDB.Exec(t, importQuery, simpleMysql...)
sqlDB.QueryRow(t, "SELECT count(*) FROM simple").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.Exec(t, `DROP TABLE IF EXISTS simple`)
sqlDB.Exec(t, `DROP SEQUENCE IF EXISTS simple_auto_inc`)
// multiple tables
importQuery = fmt.Sprintf(`IMPORT MYSQLDUMP ($1) WITH row_limit="%d"`, expectedRowLimit)
sqlDB.Exec(t, importQuery, multitableMysql...)
sqlDB.QueryRow(t, "SELECT count(*) FROM second").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
sqlDB.QueryRow(t, "SELECT count(*) FROM simple").Scan(&numRows)
require.Equal(t, expectedRowLimit, numRows)
})
t.Run("row limit multiple csv", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE test; USE test`)
defer sqlDB.Exec(t, (`DROP DATABASE test`))
data = "pear\navocado\nwatermelon\nsugar"
sqlDB.Exec(t, `IMPORT TABLE t (s STRING) CSV DATA ($1, $2) WITH row_limit='2'`,
srv.URL, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`,
[][]string{{"pear"}, {"avocado"}, {"pear"}, {"avocado"}})
sqlDB.Exec(t, "DROP TABLE t")
})
}
func TestImportCSVStmt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderShort(t)
skip.UnderRace(t, "takes >1min under race")
const nodes = 3
numFiles := nodes + 2
rowsPerFile := 1000
rowsPerRaceFile := 16
var forceFailure bool
blockGC := make(chan struct{})
ctx := context.Background()
baseDir := testutils.TestDataPath(t, "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{
SQLMemoryPoolSize: 256 << 20,
ExternalIODir: baseDir,
Knobs: base.TestingKnobs{
GCJob: &sql.GCJobTestingKnobs{RunBeforeResume: func(_ jobspb.JobID) error { <-blockGC; return nil }},
},
}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
for i := range tc.Servers {
tc.Servers[i].JobRegistry().(*jobs.Registry).TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*importResumer)
r.testingKnobs.afterImport = func(_ backupccl.RowCount) error {
if forceFailure {
return errors.New("testing injected failure")
}
return nil
}
return r
},
}
}
sqlDB := sqlutils.MakeSQLRunner(conn)
kvDB := tc.Server(0).DB()
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
testFiles := makeCSVData(t, numFiles, rowsPerFile, nodes, rowsPerRaceFile)
if util.RaceEnabled {
// This test takes a while with the race detector, so reduce the number of
// files and rows per file in an attempt to speed it up.
numFiles = nodes
rowsPerFile = rowsPerRaceFile
}
// Table schema used in IMPORT TABLE tests.
schema := []interface{}{"nodelocal://0/table"}
empty := []string{"'nodelocal://0/empty.csv'"}
emptySchema := []interface{}{"nodelocal://0/empty.schema"}
// Support subtests by keeping track of the number of jobs that are executed.
testNum := -1
expectedRows := numFiles * rowsPerFile
for i, tc := range []struct {
name string
query string // must have one `%s` for the files list.
args []interface{} // will have backupPath appended
files []string
jobOpts string
err string
}{
{
"schema-in-file",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.files,
``,
"",
},
{
"schema-in-file-intodb",
`IMPORT TABLE csv1.t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.files,
``,
"",
},
{
"schema-in-query",
`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING, INDEX (b), INDEX (a, b)) CSV DATA (%s)`,
nil,
testFiles.files,
``,
"",
},
{
"schema-in-query-opts",
`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING, INDEX (b), INDEX (a, b)) CSV DATA (%s) WITH delimiter = '|', comment = '#', nullif='', skip = '2'`,
nil,
testFiles.filesWithOpts,
` WITH comment = '#', delimiter = '|', "nullif" = '', skip = '2'`,
"",
},
{
// Force some SST splits.
"schema-in-file-sstsize",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH sstsize = '10K'`,
schema,
testFiles.files,
` WITH sstsize = '10K'`,
"",
},
{
"empty-file",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
empty,
``,
"",
},
{
"empty-with-files",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
append(empty, testFiles.files...),
``,
"",
},
{
"schema-in-file-auto-decompress",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'auto'`,
schema,
testFiles.files,
` WITH decompress = 'auto'`,
"",
},
{
"schema-in-file-no-decompress",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'none'`,
schema,
testFiles.files,
` WITH decompress = 'none'`,
"",
},
{
"schema-in-file-explicit-gzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'gzip'`,
schema,
testFiles.gzipFiles,
` WITH decompress = 'gzip'`,
"",
},
{
"schema-in-file-auto-gzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'auto'`,
schema,
testFiles.bzipFiles,
` WITH decompress = 'auto'`,
"",
},
{
"schema-in-file-implicit-gzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.gzipFiles,
``,
"",
},
{
"schema-in-file-explicit-bzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'bzip'`,
schema,
testFiles.bzipFiles,
` WITH decompress = 'bzip'`,
"",
},
{
"schema-in-file-auto-bzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'auto'`,
schema,
testFiles.bzipFiles,
` WITH decompress = 'auto'`,
"",
},
{
"schema-in-file-implicit-bzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.bzipFiles,
``,
"",
},
// NB: successes above, failures below, because we check the i-th job.
{
"bad-opt-name",
`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING, INDEX (b), INDEX (a, b)) CSV DATA (%s) WITH foo = 'bar'`,
nil,
testFiles.files,
``,
"invalid option \"foo\"",
},
{
"primary-key-dup",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.filesWithDups,
``,
"duplicate key in primary index",
},
{
"no-database",
`IMPORT TABLE nonexistent.t CREATE USING $1 CSV DATA (%s)`,
schema,
testFiles.files,
``,
`database does not exist: "nonexistent.t"`,
},
{
"into-db-fails",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH into_db = 'test'`,
schema,
testFiles.files,
``,
`invalid option "into_db"`,
},
{
"schema-in-file-no-decompress-gzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'none'`,
schema,
testFiles.gzipFiles,
` WITH decompress = 'none'`,
// This returns different errors for `make test` and `make testrace` but
// field is in both error messages.
`field`,
},
{
"schema-in-file-decompress-gzip",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH decompress = 'gzip'`,
schema,
testFiles.files,
` WITH decompress = 'gzip'`,
"gzip: invalid header",
},
{
"csv-with-invalid-delimited-option",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s) WITH fields_delimited_by = '|'`,
schema,
testFiles.files,
``,
"invalid option",
},
{
"empty-schema-in-file",
`IMPORT TABLE t CREATE USING $1 CSV DATA (%s)`,
emptySchema,
testFiles.files,
``,
"expected 1 create table statement",
},
} {
t.Run(tc.name, func(t *testing.T) {
if strings.Contains(tc.name, "bzip") && len(testFiles.bzipFiles) == 0 {
skip.IgnoreLint(t, "bzip2 not available on PATH?")
}
intodb := fmt.Sprintf(`csv%d`, i)
sqlDB.Exec(t, fmt.Sprintf(`CREATE DATABASE %s`, intodb))
sqlDB.Exec(t, fmt.Sprintf(`SET DATABASE = %s`, intodb))
var unused string
var restored struct {
rows, idx, bytes int
}
var result int
query := fmt.Sprintf(tc.query, strings.Join(tc.files, ", "))
testNum++
if tc.err != "" {
sqlDB.ExpectErr(t, tc.err, query, tc.args...)
return
}
sqlDB.QueryRow(t, query, tc.args...).Scan(
&unused, &unused, &unused, &restored.rows, &restored.idx, &restored.bytes,
)
jobPrefix := fmt.Sprintf(`IMPORT TABLE %s.public.t (a INT8 PRIMARY KEY, b STRING, INDEX (b), INDEX (a, b))`, intodb)
var intodbID descpb.ID
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT id FROM system.namespace WHERE name = '%s'`,
intodb)).Scan(&intodbID)
var publicSchemaID descpb.ID
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT id FROM system.namespace WHERE name = '%s'`,
tree.PublicSchema)).Scan(&publicSchemaID)
var tableID int64
sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE "parentID" = $1 AND "parentSchemaID" = $2`,
intodbID, publicSchemaID).Scan(&tableID)
if err := jobutils.VerifySystemJob(t, sqlDB, testNum, jobspb.TypeImport, jobs.StatusSucceeded, jobs.Record{
Username: security.RootUserName(),
Description: fmt.Sprintf(jobPrefix+` CSV DATA (%s)`+tc.jobOpts, strings.ReplaceAll(strings.Join(tc.files, ", "), "?AWS_SESSION_TOKEN=secrets", "?AWS_SESSION_TOKEN=redacted")),
DescriptorIDs: []descpb.ID{descpb.ID(tableID)},
}); err != nil {
t.Fatal(err)
}
isEmpty := len(tc.files) == 1 && tc.files[0] == empty[0]
if isEmpty {
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if expect := 0; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
return
}
if expected, actual := expectedRows, restored.rows; expected != actual {
t.Fatalf("expected %d rows, got %d", expected, actual)
}
// Verify correct number of rows via COUNT.
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if expect := expectedRows; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
// Verify correct number of NULLs via COUNT.
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE b IS NULL`).Scan(&result)
expectedNulls := 0
if strings.Contains(tc.query, "nullif") {
expectedNulls = expectedRows / 4
}
if result != expectedNulls {
t.Fatalf("expected %d rows, got %d", expectedNulls, result)
}
// Verify sstsize created > 1 SST files.
if tc.name == "schema-in-file-sstsize-dist" {
pattern := filepath.Join(baseDir, fmt.Sprintf("%d", i), "*.sst")
matches, err := filepath.Glob(pattern)
if err != nil {
t.Fatal(err)
}
if len(matches) < 2 {
t.Fatal("expected > 1 SST files")
}
}
})
}
// Verify unique_rowid is replaced for tables without primary keys.
t.Run("unique_rowid", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE pk")
sqlDB.Exec(t, fmt.Sprintf(`IMPORT TABLE pk.t (a INT8, b STRING) CSV DATA (%s)`, strings.Join(testFiles.files, ", ")))
// Verify the rowids are being generated as expected.
sqlDB.CheckQueryResults(t,
`SELECT count(*) FROM pk.t`,
sqlDB.QueryStr(t, `
SELECT count(*) FROM
(SELECT * FROM
(SELECT generate_series(0, $1 - 1) file),
(SELECT generate_series(1, $2) rownum)
)
`, numFiles, rowsPerFile),
)
})
// Verify a failed IMPORT won't prevent a second IMPORT.
t.Run("checkpoint-leftover", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE checkpoint; USE checkpoint")
// Specify wrong number of columns.
sqlDB.ExpectErr(
t, "expected 1 fields, got 2",
fmt.Sprintf(`IMPORT TABLE t (a INT8 PRIMARY KEY) CSV DATA (%s)`, testFiles.files[0]),
)
// Specify wrong table name; still shouldn't leave behind a checkpoint file.
sqlDB.ExpectErr(
t, `file specifies a schema for table t`,
fmt.Sprintf(`IMPORT TABLE bad CREATE USING $1 CSV DATA (%s)`, testFiles.files[0]), schema[0],
)
// Expect it to succeed with correct columns.
sqlDB.Exec(t, fmt.Sprintf(`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING) CSV DATA (%s)`, testFiles.files[0]))
// A second attempt should fail fast. A "slow fail" is the error message
// "restoring table desc and namespace entries: table already exists".
sqlDB.ExpectErr(
t, `relation "t" already exists`,
fmt.Sprintf(`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING) CSV DATA (%s)`, testFiles.files[0]),
)
})
// Verify that a failed import will clean up after itself. This means:
// - Delete the garbage data that it partially imported.
// - Delete the table descriptor for the table that was created during the
// import.
t.Run("failed-import-gc", func(t *testing.T) {
forceFailure = true
defer func() { forceFailure = false }()
defer gcjob.SetSmallMaxGCIntervalForTest()()
beforeImport, err := tree.MakeDTimestampTZ(tc.Server(0).Clock().Now().GoTime(), time.Millisecond)
if err != nil {
t.Fatal(err)
}
sqlDB.Exec(t, "CREATE DATABASE failedimport; USE failedimport;")
// Hit a failure during import.
sqlDB.ExpectErr(
t, `testing injected failure`,
fmt.Sprintf(`IMPORT TABLE t (a INT PRIMARY KEY, b STRING) CSV DATA (%s)`, testFiles.files[1]),
)
// Nudge the registry to quickly adopt the job.
tc.Server(0).JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
// In the case of the test, the ID of the table that will be cleaned up due
// to the failed import will be one higher than the ID of the empty database
// it was created in.
dbID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "failedimport")
tableID := descpb.ID(dbID + 1)
var td catalog.TableDescriptor
if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) {
td, err = catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, tableID)
return err
}); err != nil {
t.Fatal(err)
}
// Ensure that we have garbage written to the descriptor that we want to
// clean up.
tests.CheckKeyCount(t, kvDB, td.TableSpan(keys.SystemSQLCodec), rowsPerFile)
// Allow GC to progress.
close(blockGC)
// Ensure that a GC job was created, and wait for it to finish.
doneGCQuery := fmt.Sprintf(
"SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND created > %s",
"SCHEMA CHANGE GC", jobs.StatusSucceeded, beforeImport.String(),
)
sqlDB.CheckQueryResultsRetry(t, doneGCQuery, [][]string{{"1"}})
// Expect there are no more KVs for this span.
tests.CheckKeyCount(t, kvDB, td.TableSpan(keys.SystemSQLCodec), 0)
// Expect that the table descriptor is deleted.
if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
_, err := catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, tableID)
if !testutils.IsError(err, "descriptor not found") {
return err
}
return nil
}); err != nil {
t.Fatal(err)
}
})
// Test basic role based access control. Users who have the admin role should
// be able to IMPORT.
t.Run("RBAC-SuperUser", func(t *testing.T) {
sqlDB.Exec(t, `CREATE USER testuser`)
sqlDB.Exec(t, `GRANT admin TO testuser`)
pgURL, cleanupFunc := sqlutils.PGUrl(
t, tc.Server(0).ServingSQLAddr(), "TestImportPrivileges-testuser",
url.User("testuser"),
)
defer cleanupFunc()
testuser, err := gosql.Open("postgres", pgURL.String())
if err != nil {
t.Fatal(err)
}
defer testuser.Close()
t.Run("IMPORT TABLE", func(t *testing.T) {
if _, err := testuser.Exec(fmt.Sprintf(`IMPORT TABLE rbac_superuser (a INT8 PRIMARY KEY,
b STRING) CSV DATA (%s)`, testFiles.files[0])); err != nil {
t.Fatal(err)
}
})
t.Run("IMPORT INTO", func(t *testing.T) {
if _, err := testuser.Exec("CREATE TABLE rbac_into_superuser (a INT8 PRIMARY KEY, " +
"b STRING)"); err != nil {
t.Fatal(err)
}
if _, err := testuser.Exec(fmt.Sprintf(`IMPORT INTO rbac_into_superuser (a, b) CSV DATA (%s)`, testFiles.files[0])); err != nil {
t.Fatal(err)
}
})
})
// Verify DEFAULT columns and SERIAL are allowed but not evaluated.
t.Run("allow-default", func(t *testing.T) {
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
sqlDB.Exec(t, `CREATE DATABASE d`)
sqlDB.Exec(t, `SET DATABASE = d`)
const (
query = `IMPORT TABLE t (
a SERIAL8,
b INT8 DEFAULT unique_rowid(),
c STRING DEFAULT 's',
d SERIAL8,
e INT8 DEFAULT unique_rowid(),
f STRING DEFAULT 's',
PRIMARY KEY (a, b, c)
) CSV DATA ($1)`
nullif = ` WITH nullif=''`
)
data = ",5,e,7,,"
t.Run(data, func(t *testing.T) {
sqlDB.ExpectErr(
t, `row 1: parse "a" as INT8: could not parse ""`,
query, srv.URL,
)
sqlDB.ExpectErr(
t, `row 1: generate insert row: null value in column "a" violates not-null constraint`,
query+nullif, srv.URL,
)
})
data = "2,5,e,,,"
t.Run(data, func(t *testing.T) {
sqlDB.ExpectErr(
t, `row 1: generate insert row: null value in column "d" violates not-null constraint`,
query+nullif, srv.URL,
)
})
data = "2,,e,,,"
t.Run(data, func(t *testing.T) {
sqlDB.ExpectErr(
t, `"b" violates not-null constraint`,
query+nullif, srv.URL,
)
})
data = "2,5,,,,"
t.Run(data, func(t *testing.T) {
sqlDB.ExpectErr(
t, `"c" violates not-null constraint`,
query+nullif, srv.URL,
)
})
data = "2,5,e,-1,,"
t.Run(data, func(t *testing.T) {
sqlDB.Exec(t, query+nullif, srv.URL)
sqlDB.CheckQueryResults(t,
`SELECT * FROM t`,
sqlDB.QueryStr(t, `SELECT 2, 5, 'e', -1, NULL, NULL`),
)
})
})
// Test userfile import CSV.
t.Run("userfile-simple", func(t *testing.T) {
userfileURI := "userfile://defaultdb.public.root/test.csv"
userfileStorage, err := tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.
ExternalStorageFromURI(ctx, userfileURI, security.RootUserName())
require.NoError(t, err)
data := []byte("1,2")
require.NoError(t, userfileStorage.WriteFile(ctx, "", bytes.NewReader(data)))
sqlDB.Exec(t, fmt.Sprintf("IMPORT TABLE foo (id INT PRIMARY KEY, "+
"id2 INT) CSV DATA ('%s')", userfileURI))
sqlDB.CheckQueryResults(t, "SELECT * FROM foo", sqlDB.QueryStr(t, "SELECT 1, 2"))
require.NoError(t, userfileStorage.Delete(ctx, ""))
})
t.Run("userfile-relative-file-path", func(t *testing.T) {
userfileURI := "userfile:///import-test/employees.csv"
userfileStorage, err := tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.
ExternalStorageFromURI(ctx, userfileURI, security.RootUserName())
require.NoError(t, err)
data := []byte("1,2")
require.NoError(t, userfileStorage.WriteFile(ctx, "", bytes.NewReader(data)))
sqlDB.Exec(t, fmt.Sprintf("IMPORT TABLE baz (id INT PRIMARY KEY, "+
"id2 INT) CSV DATA ('%s')", userfileURI))
sqlDB.CheckQueryResults(t, "SELECT * FROM baz", sqlDB.QueryStr(t, "SELECT 1, 2"))
require.NoError(t, userfileStorage.Delete(ctx, ""))
})
t.Run("import-with-db-privs", func(t *testing.T) {
sqlDB.Exec(t, `USE defaultdb`)
sqlDB.Exec(t, `CREATE USER foo`)
sqlDB.Exec(t, `GRANT ALL ON DATABASE defaultdb TO foo`)
sqlDB.Exec(t, fmt.Sprintf(`
IMPORT TABLE import_with_db_privs (a INT8 PRIMARY KEY, b STRING) CSV DATA (%s)`,
testFiles.files[0]))
// Verify correct number of rows via COUNT.
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM import_with_db_privs`).Scan(&result)
if result != rowsPerFile {
t.Fatalf("expected %d rows, got %d", rowsPerFile, result)
}
})
t.Run("user-defined-schemas", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE uds`)
sqlDB.Exec(t, `USE uds`)
sqlDB.Exec(t, `CREATE SCHEMA sc`)
// Now import into a table under sc.
sqlDB.Exec(t, `CREATE TABLE uds.sc.t (a INT8 PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO uds.sc.t (a, b) CSV DATA (%s)`, testFiles.files[0]))
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM uds.sc.t`).Scan(&result)
require.Equal(t, rowsPerFile, result)
})
}
// TestImportFeatureFlag tests the feature flag logic that allows the IMPORT and
// IMPORT INTO commands to be toggled off via cluster settings.
func TestImportFeatureFlag(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer jobs.ResetConstructors()()
const nodes = 1
numFiles := nodes + 2
rowsPerFile := 1000
rowsPerRaceFile := 16
ctx := context.Background()
baseDir := filepath.Join("testdata", "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
testFiles := makeCSVData(t, numFiles, rowsPerFile, nodes, rowsPerRaceFile)
// Feature flag is off — test that IMPORT and IMPORT INTO surface error.
sqlDB.Exec(t, `SET CLUSTER SETTING feature.import.enabled = FALSE`)
sqlDB.ExpectErr(t, `feature IMPORT was disabled by the database administrator`,
fmt.Sprintf(`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING) CSV DATA (%s)`, testFiles.files[0]))
sqlDB.Exec(t, `CREATE TABLE feature_flags (a INT8 PRIMARY KEY, b STRING)`)
sqlDB.ExpectErr(t, `feature IMPORT was disabled by the database administrator`,
fmt.Sprintf(`IMPORT INTO feature_flags (a, b) CSV DATA (%s)`, testFiles.files[0]))
// Feature flag is on — test that IMPORT and IMPORT INTO do not error.
sqlDB.Exec(t, `SET CLUSTER SETTING feature.import.enabled = TRUE`)
sqlDB.Exec(t, fmt.Sprintf(`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING) CSV DATA (%s)`,
testFiles.files[0]))
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO feature_flags (a, b) CSV DATA (%s)`, testFiles.files[0]))
}
func TestImportObjectLevelRBAC(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const nodes = 3
ctx := context.Background()
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{
SQLMemoryPoolSize: 256 << 20,
}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
rootDB := sqlutils.MakeSQLRunner(conn)
rootDB.Exec(t, `CREATE USER testuser`)
pgURL, cleanupFunc := sqlutils.PGUrl(
t, tc.Server(0).ServingSQLAddr(), "TestImportPrivileges-testuser",
url.User("testuser"),
)
defer cleanupFunc()
startTestUser := func() *gosql.DB {
testuser, err := gosql.Open("postgres", pgURL.String())
require.NoError(t, err)
return testuser
}
qualifiedTableName := "defaultdb.public.user_file_table_test"
filename := "path/to/file"
dest := cloudimpl.MakeUserFileStorageURI(qualifiedTableName, filename)
writeToUserfile := func(filename string) {
// Write to userfile storage now that testuser has CREATE privileges.
ie := tc.Server(0).InternalExecutor().(*sql.InternalExecutor)
fileTableSystem1, err := cloudimpl.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{},
cluster.NoSettings, blobs.TestEmptyBlobClientFactory, security.TestUserName(), ie, tc.Server(0).DB())
require.NoError(t, err)
require.NoError(t, fileTableSystem1.WriteFile(ctx, filename, bytes.NewReader([]byte("1,aaa"))))
}
t.Run("import-RBAC", func(t *testing.T) {
userfileDest := dest + "/" + t.Name()
testuser := startTestUser()
// User has no privileges at this point. Check that an IMPORT requires
// CREATE privileges on the database.
_, err := testuser.Exec(fmt.Sprintf(`IMPORT TABLE rbac_import_priv (a INT8 PRIMARY KEY,
b STRING) CSV DATA ('%s')`, userfileDest))
require.True(t, testutils.IsError(err, "testuser does not have CREATE privilege on database"))
// Grant user CREATE privilege on the database.
rootDB.Exec(t, `GRANT create ON DATABASE defaultdb TO testuser`)
// Reopen testuser sql connection.
// TODO(adityamaru): The above GRANT does not reflect unless we restart
// the testuser SQL connection, understand why.
require.NoError(t, testuser.Close())
testuser = startTestUser()
defer testuser.Close()
// Write to userfile now that the user has CREATE privileges.
writeToUserfile(t.Name())
// Import should now have the required privileges to start the job.
_, err = testuser.Exec(fmt.Sprintf(`IMPORT TABLE rbac_import_priv (a INT8 PRIMARY KEY,
b STRING) CSV DATA ('%s')`, userfileDest))
require.NoError(t, err)
})
t.Run("import-into-RBAC", func(t *testing.T) {
// Create table to IMPORT INTO.
rootDB.Exec(t, `CREATE TABLE rbac_import_into_priv (a INT8 PRIMARY KEY, b STRING)`)
userFileDest := dest + "/" + t.Name()
testuser := startTestUser()
// User has no privileges at this point. Check that an IMPORT INTO requires
// INSERT and DROP privileges.
for _, privilege := range []string{"INSERT", "DROP"} {
_, err := testuser.Exec(fmt.Sprintf(`IMPORT INTO rbac_import_into_priv (a,
b) CSV DATA ('%s')`, userFileDest))
require.True(t, testutils.IsError(err,
fmt.Sprintf("user testuser does not have %s privilege on relation rbac_import_into_priv",
privilege)))
rootDB.Exec(t, fmt.Sprintf(`GRANT %s ON TABLE rbac_import_into_priv TO testuser`, privilege))
}
// Grant user CREATE privilege on the database.
rootDB.Exec(t, `GRANT create ON DATABASE defaultdb TO testuser`)
// Reopen testuser sql connection.
// TODO(adityamaru): The above GRANT does not reflect unless we restart
// the testuser SQL connection, understand why.
require.NoError(t, testuser.Close())
testuser = startTestUser()
defer testuser.Close()
// Write to userfile now that the user has CREATE privileges.
writeToUserfile(t.Name())
// Import should now have the required privileges to start the job.
_, err := testuser.Exec(fmt.Sprintf(`IMPORT INTO rbac_import_into_priv (a,b) CSV DATA ('%s')`,
userFileDest))
require.NoError(t, err)
})
}
// TestURIRequiresAdminRole tests the IMPORT logic which guards certain
// privileged ExternalStorage IO paths with an admin only check.
func TestURIRequiresAdminRole(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const nodes = 3
ctx := context.Background()
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{
SQLMemoryPoolSize: 256 << 20,
}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
rootDB := sqlutils.MakeSQLRunner(conn)
rootDB.Exec(t, `CREATE USER testuser`)
pgURL, cleanupFunc := sqlutils.PGUrl(
t, tc.Server(0).ServingSQLAddr(), "TestImportPrivileges-testuser",
url.User("testuser"),
)
defer cleanupFunc()
testuser, err := gosql.Open("postgres", pgURL.String())
require.NoError(t, err)
defer testuser.Close()
for _, tc := range []struct {
name string
uri string
requiresAdmin bool
}{
{
name: "s3-implicit",
uri: "s3://foo/bar?AUTH=implicit",
requiresAdmin: true,
},
{
name: "s3-specified",
uri: "s3://foo/bar?AUTH=specified",
requiresAdmin: false,
},
{
name: "s3-custom",
uri: "s3://foo/bar?AUTH=specified&AWS_ENDPOINT=baz",
requiresAdmin: true,
},
{
name: "gs-implicit",
uri: "gs://foo/bar?AUTH=implicit",
requiresAdmin: true,
},
{
name: "gs-specified",
uri: "gs://foo/bar?AUTH=specified",
requiresAdmin: false,
},
{
name: "userfile",
uri: "userfile:///foo",
requiresAdmin: false,
},
{
name: "nodelocal",
uri: "nodelocal://self/foo",
requiresAdmin: true,
},
{
name: "http",
uri: "http://foo/bar",
requiresAdmin: true,
},
{
name: "https",
uri: "https://foo/bar",
requiresAdmin: true,
},
} {
t.Run(tc.name+"-via-import", func(t *testing.T) {
_, err := testuser.Exec(fmt.Sprintf(`IMPORT TABLE foo (id INT) CSV DATA ('%s')`, tc.uri))
if tc.requiresAdmin {
require.True(t, testutils.IsError(err, "only users with the admin role are allowed to IMPORT"))
} else {
require.False(t, testutils.IsError(err, "only users with the admin role are allowed to IMPORT"))
}
})
t.Run(tc.name+"-direct", func(t *testing.T) {
requires, scheme, err := cloud.AccessIsWithExplicitAuth(tc.uri)
require.NoError(t, err)
require.Equal(t, requires, !tc.requiresAdmin)
url, err := url.Parse(tc.uri)
require.NoError(t, err)
require.Equal(t, scheme, url.Scheme)
})
}
}
func TestExportImportRoundTrip(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
baseDir, cleanup := testutils.TempDir(t)
defer cleanup()
tc := testcluster.StartTestCluster(
t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
tests := []struct {
stmts string
tbl string
expected string
}{
// Note that the directory names that are being imported from and exported into
// need to differ across runs, so we let the test runner format the stmts field
// with a unique directory name per run.
{
stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY['a', 'b', 'c'];
IMPORT TABLE t (x TEXT[]) CSV DATA ('nodelocal://0/%[1]s/export*-n1.0.csv')`,
tbl: "t",
expected: `SELECT ARRAY['a', 'b', 'c']`,
},
{
stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY[b'abc', b'\141\142\143', b'\x61\x62\x63'];
IMPORT TABLE t (x BYTES[]) CSV DATA ('nodelocal://0/%[1]s/export*-n1.0.csv')`,
tbl: "t",
expected: `SELECT ARRAY[b'abc', b'\141\142\143', b'\x61\x62\x63']`,
},
{
stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT 'dog' COLLATE en;
IMPORT TABLE t (x STRING COLLATE en) CSV DATA ('nodelocal://0/%[1]s/export*-n1.0.csv')`,
tbl: "t",
expected: `SELECT 'dog' COLLATE en`,
},
}
for i, test := range tests {
sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE IF EXISTS %s`, test.tbl))
sqlDB.Exec(t, fmt.Sprintf(test.stmts, fmt.Sprintf("run%d", i)))
sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s`, test.tbl), sqlDB.QueryStr(t, test.expected))
}
}
// TODO(adityamaru): Tests still need to be added incrementally as
// relevant IMPORT INTO logic is added. Some of them include:
// -> FK and constraint violation
// -> CSV containing keys which will shadow existing data
// -> Rollback of a failed IMPORT INTO
func TestImportIntoCSV(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderShort(t)
skip.UnderRace(t, "takes >1min under race")
const nodes = 3
numFiles := nodes + 2
rowsPerFile := 1000
rowsPerRaceFile := 16
ctx := context.Background()
baseDir := testutils.TestDataPath(t, "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
var forceFailure bool
var importBodyFinished chan struct{}
var delayImportFinish chan struct{}
for i := range tc.Servers {
tc.Servers[i].JobRegistry().(*jobs.Registry).TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*importResumer)
r.testingKnobs.afterImport = func(_ backupccl.RowCount) error {
if importBodyFinished != nil {
importBodyFinished <- struct{}{}
}
if delayImportFinish != nil {
<-delayImportFinish
}
if forceFailure {
return errors.New("testing injected failure")
}
return nil
}
return r
},
}
}
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
testFiles := makeCSVData(t, numFiles, rowsPerFile, nodes, rowsPerRaceFile)
if util.RaceEnabled {
// This test takes a while with the race detector, so reduce the number of
// files and rows per file in an attempt to speed it up.
numFiles = nodes
rowsPerFile = rowsPerRaceFile
}
empty := []string{"'nodelocal://0/empty.csv'"}
// Support subtests by keeping track of the number of jobs that are executed.
testNum := -1
insertedRows := numFiles * rowsPerFile
for _, tc := range []struct {
name string
query string // must have one `%s` for the files list.
files []string
jobOpts string
err string
}{
{
"simple-import-into",
`IMPORT INTO t (a, b) CSV DATA (%s)`,
testFiles.files,
``,
"",
},
{
"import-into-with-opts",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH delimiter = '|', comment = '#', nullif='', skip = '2'`,
testFiles.filesWithOpts,
` WITH comment = '#', delimiter = '|', "nullif" = '', skip = '2'`,
"",
},
{
// Force some SST splits.
"import-into-sstsize",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH sstsize = '10K'`,
testFiles.files,
` WITH sstsize = '10K'`,
"",
},
{
"empty-file",
`IMPORT INTO t (a, b) CSV DATA (%s)`,
empty,
``,
"",
},
{
"empty-with-files",
`IMPORT INTO t (a, b) CSV DATA (%s)`,
append(empty, testFiles.files...),
``,
"",
},
{
"import-into-auto-decompress",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`,
testFiles.files,
` WITH decompress = 'auto'`,
"",
},
{
"import-into-no-decompress",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`,
testFiles.files,
` WITH decompress = 'none'`,
"",
},
{
"import-into-explicit-gzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`,
testFiles.gzipFiles,
` WITH decompress = 'gzip'`,
"",
},
{
"import-into-auto-gzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`,
testFiles.gzipFiles,
` WITH decompress = 'auto'`,
"",
},
{
"import-into-implicit-gzip",
`IMPORT INTO t (a, b) CSV DATA (%s)`,
testFiles.gzipFiles,
``,
"",
},
{
"import-into-explicit-bzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'bzip'`,
testFiles.bzipFiles,
` WITH decompress = 'bzip'`,
"",
},
{
"import-into-auto-bzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`,
testFiles.bzipFiles,
` WITH decompress = 'auto'`,
"",
},
{
"import-into-implicit-bzip",
`IMPORT INTO t (a, b) CSV DATA (%s)`,
testFiles.bzipFiles,
``,
"",
},
{
"import-into-no-decompress-wildcard",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`,
testFiles.filesUsingWildcard,
` WITH decompress = 'none'`,
"",
},
{
"import-into-explicit-gzip-wildcard",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`,
testFiles.gzipFilesUsingWildcard,
` WITH decompress = 'gzip'`,
"",
},
{
"import-into-auto-bzip-wildcard",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`,
testFiles.gzipFilesUsingWildcard,
` WITH decompress = 'auto'`,
"",
},
// NB: successes above, failures below, because we check the i-th job.
{
"import-into-bad-opt-name",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH foo = 'bar'`,
testFiles.files,
``,
"invalid option \"foo\"",
},
{
"import-into-no-database",
`IMPORT INTO nonexistent.t (a, b) CSV DATA (%s)`,
testFiles.files,
``,
`database does not exist: "nonexistent.t"`,
},
{
"import-into-no-table",
`IMPORT INTO g (a, b) CSV DATA (%s)`,
testFiles.files,
``,
`pq: relation "g" does not exist`,
},
{
"import-into-no-decompress-gzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`,
testFiles.gzipFiles,
` WITH decompress = 'none'`,
// This returns different errors for `make test` and `make testrace` but
// field is in both error messages.
"field",
},
{
"import-into-no-decompress-gzip",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`,
testFiles.files,
` WITH decompress = 'gzip'`,
"gzip: invalid header",
},
{
"import-no-files-match-wildcard",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`,
[]string{`'nodelocal://0/data-[0-9][0-9]*'`},
` WITH decompress = 'auto'`,
`pq: no files matched uri provided`,
},
{
"import-into-no-glob-wildcard",
`IMPORT INTO t (a, b) CSV DATA (%s) WITH disable_glob_matching`,
testFiles.filesUsingWildcard,
` WITH disable_glob_matching`,
"pq: (.+) no such file or directory",
},
} {
t.Run(tc.name, func(t *testing.T) {
if strings.Contains(tc.name, "bzip") && len(testFiles.bzipFiles) == 0 {
skip.IgnoreLint(t, "bzip2 not available on PATH?")
}
sqlDB.Exec(t, `CREATE TABLE t (a INT, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
var tableID int64
sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 't'`).Scan(&tableID)
var unused string
var restored struct {
rows, idx, bytes int
}
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
numExistingRows := len(insert)
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
var result int
query := fmt.Sprintf(tc.query, strings.Join(tc.files, ", "))
testNum++
if tc.err != "" {
sqlDB.ExpectErr(t, tc.err, query)
return
}
sqlDB.QueryRow(t, query).Scan(
&unused, &unused, &unused, &restored.rows, &restored.idx, &restored.bytes,
)
jobPrefix := `IMPORT INTO defaultdb.public.t(a, b)`
if err := jobutils.VerifySystemJob(t, sqlDB, testNum, jobspb.TypeImport, jobs.StatusSucceeded, jobs.Record{
Username: security.RootUserName(),
Description: fmt.Sprintf(jobPrefix+` CSV DATA (%s)`+tc.jobOpts, strings.ReplaceAll(strings.Join(tc.files, ", "), "?AWS_SESSION_TOKEN=secrets", "?AWS_SESSION_TOKEN=redacted")),
DescriptorIDs: []descpb.ID{descpb.ID(tableID)},
}); err != nil {
t.Fatal(err)
}
isEmpty := len(tc.files) == 1 && tc.files[0] == empty[0]
if isEmpty {
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if result != numExistingRows {
t.Fatalf("expected %d rows, got %d", numExistingRows, result)
}
return
}
if expected, actual := insertedRows, restored.rows; expected != actual {
t.Fatalf("expected %d rows, got %d", expected, actual)
}
// Verify correct number of rows via COUNT.
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if expect := numExistingRows + insertedRows; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
// Verify correct number of NULLs via COUNT.
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE b IS NULL`).Scan(&result)
expectedNulls := 0
if strings.Contains(tc.query, "nullif") {
expectedNulls = insertedRows / 4
}
if result != expectedNulls {
t.Fatalf("expected %d rows, got %d", expectedNulls, result)
}
})
}
// Verify unique_rowid is replaced for tables without primary keys.
t.Run("import-into-unique_rowid", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
numExistingRows := len(insert)
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, strings.Join(testFiles.files, ", ")))
// Verify the rowids are being generated as expected.
sqlDB.CheckQueryResults(t,
`SELECT count(*) FROM t`,
sqlDB.QueryStr(t, `
SELECT count(*) + $3 FROM
(SELECT * FROM
(SELECT generate_series(0, $1 - 1) file),
(SELECT generate_series(1, $2) rownum)
)
`, numFiles, rowsPerFile, numExistingRows),
)
})
// Verify a failed IMPORT INTO won't prevent a subsequent IMPORT INTO.
t.Run("import-into-checkpoint-leftover", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
// Hit a failure during import.
forceFailure = true
sqlDB.ExpectErr(
t, `testing injected failure`,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[1]),
)
forceFailure = false
// Expect it to succeed on re-attempt.
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[1]))
})
// Verify that during IMPORT INTO the table is offline.
t.Run("offline-state", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
// Hit a failure during import.
importBodyFinished = make(chan struct{})
delayImportFinish = make(chan struct{})
defer func() {
importBodyFinished = nil
delayImportFinish = nil
}()
var unused interface{}
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
defer close(importBodyFinished)
_, err := sqlDB.DB.ExecContext(ctx, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[1]))
return err
})
g.GoCtx(func(ctx context.Context) error {
defer close(delayImportFinish)
<-importBodyFinished
err := sqlDB.DB.QueryRowContext(ctx, `SELECT 1 FROM t`).Scan(&unused)
if !testutils.IsError(err, `relation "t" is offline: importing`) {
return err
}
return nil
})
if err := g.Wait(); err != nil {
t.Fatal(err)
}
skip.WithIssue(t, 51812)
// Expect it to succeed on re-attempt.
sqlDB.QueryRow(t, `SELECT 1 FROM t`).Scan(&unused)
})
// Tests for user specified target columns in IMPORT INTO statements.
//
// Tests IMPORT INTO with various target column sets, and an implicit PK
// provided by the hidden column row_id.
t.Run("target-cols-with-default-pk", func(t *testing.T) {
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
createQuery := `CREATE TABLE t (a INT8,
b INT8,
c STRING,
d INT8,
e INT8,
f STRING)`
t.Run(data, func(t *testing.T) {
sqlDB.Exec(t, createQuery)
defer sqlDB.Exec(t, `DROP TABLE t`)
data = "1"
sqlDB.Exec(t, `IMPORT INTO t (a) CSV DATA ($1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`,
sqlDB.QueryStr(t, `SELECT 1, NULL, NULL, NULL, NULL, 'NULL'`),
)
})
t.Run(data, func(t *testing.T) {
sqlDB.Exec(t, createQuery)
defer sqlDB.Exec(t, `DROP TABLE t`)
data = "1,teststr"
sqlDB.Exec(t, `IMPORT INTO t (a, f) CSV DATA ($1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`,
sqlDB.QueryStr(t, `SELECT 1, NULL, NULL, NULL, NULL, 'teststr'`),
)
})
t.Run(data, func(t *testing.T) {
sqlDB.Exec(t, createQuery)
defer sqlDB.Exec(t, `DROP TABLE t`)
data = "7,12,teststr"
sqlDB.Exec(t, `IMPORT INTO t (d, e, f) CSV DATA ($1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`,
sqlDB.QueryStr(t, `SELECT NULL, NULL, NULL, 7, 12, 'teststr'`),
)
})
})
// Tests IMPORT INTO with a target column set, and an explicit PK.
t.Run("target-cols-with-explicit-pk", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i+1000, v)
}
data := []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(strings.Join(data, "\n")))
}
}))
defer srv.Close()
sqlDB.Exec(t, "IMPORT INTO t (a) CSV DATA ($1)", srv.URL)
var result int
numExistingRows := len(insert)
// Verify that the target column has been populated.
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE a IS NOT NULL`).Scan(&result)
if expect := numExistingRows + len(data); result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
// Verify that the non-target columns have NULLs.
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE b IS NULL`).Scan(&result)
expectedNulls := len(data)
if result != expectedNulls {
t.Fatalf("expected %d rows, got %d", expectedNulls, result)
}
})
// Tests IMPORT INTO with a CSV file having more columns when targeted, expected to
// get an error indicating the error.
t.Run("csv-with-more-than-targeted-columns", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Expect an error if attempting to IMPORT INTO with CSV having more columns
// than targeted.
sqlDB.ExpectErr(
t, `row 1: expected 1 fields, got 2`,
fmt.Sprintf("IMPORT INTO t (a) CSV DATA (%s)", testFiles.files[0]),
)
})
// Tests IMPORT INTO with a target column set which does not include all PKs.
// As a result the non-target column is non-nullable, which is not allowed
// until we support DEFAULT expressions.
t.Run("target-cols-excluding-explicit-pk", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Expect an error if attempting to IMPORT INTO a target list which does
// not include all the PKs of the table.
sqlDB.ExpectErr(
t, `pq: all non-target columns in IMPORT INTO must be nullable`,
fmt.Sprintf(`IMPORT INTO t (b) CSV DATA (%s)`, testFiles.files[0]),
)
})
// Tests behavior when the existing table being imported into has more columns
// in its schema then the source CSV file.
t.Run("more-table-cols-than-csv", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT, b STRING, c INT)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
sqlDB.ExpectErr(
t, "row 1: expected 3 fields, got 2",
fmt.Sprintf(`IMPORT INTO t (a, b, c) CSV DATA (%s)`, testFiles.files[0]),
)
})
// Tests the case where we create table columns in specific order while trying
// to import data from csv where columns order is different and import expression
// defines in what order columns should be imported to align with table definition
t.Run("target-cols-reordered", func(t *testing.T) {
sqlDB.Exec(t, "CREATE TABLE t (a INT PRIMARY KEY, b INT, c STRING NOT NULL, d DECIMAL NOT NULL)")
defer sqlDB.Exec(t, `DROP TABLE t`)
const data = "3.14,c is a string,1\n2.73,another string,2"
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (d, c, a) CSV DATA ("%s")`, srv.URL))
sqlDB.CheckQueryResults(t, `SELECT * FROM t ORDER BY a`,
[][]string{{"1", "NULL", "c is a string", "3.14"}, {"2", "NULL", "another string", "2.73"}},
)
})
// Tests that we can import into the table even if the table has columns named with
// reserved keywords.
t.Run("cols-named-with-reserved-keywords", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t ("select" INT PRIMARY KEY, "from" INT, "Some-c,ol-'Name'" STRING NOT NULL)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
const data = "today,1,2"
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
sqlDB.Exec(t, fmt.Sprintf(
`IMPORT INTO t ("Some-c,ol-'Name'", "select", "from") CSV DATA ("%s")`, srv.URL))
sqlDB.CheckQueryResults(t, `SELECT * FROM t`, [][]string{{"1", "2", "today"}})
})
// Tests behvior when the existing table being imported into has fewer columns
// in its schema then the source CSV file.
t.Run("fewer-table-cols-than-csv", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.ExpectErr(
t, "row 1: expected 1 fields, got 2",
fmt.Sprintf(`IMPORT INTO t (a) CSV DATA (%s)`, testFiles.files[0]),
)
})
// Tests IMPORT INTO without any target columns specified. This implies an
// import of all columns in the exisiting table.
t.Run("no-target-cols-specified", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i+rowsPerFile, v)
}
sqlDB.Exec(t, fmt.Sprintf("IMPORT INTO t CSV DATA (%s)", testFiles.files[0]))
var result int
numExistingRows := len(insert)
// Verify that all columns have been populated with imported data.
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE a IS NOT NULL`).Scan(&result)
if expect := numExistingRows + rowsPerFile; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
sqlDB.QueryRow(t, `SELECT count(*) FROM t WHERE b IS NOT NULL`).Scan(&result)
if expect := numExistingRows + rowsPerFile; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
})
t.Run("import-not-targeted-not-null", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT, b INT NOT NULL)`)
const data = "1\n2\n3"
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.ExpectErr(t, `violated by column "b"`,
fmt.Sprintf(`IMPORT INTO t (a) CSV DATA ("%s")`, srv.URL),
)
})
// IMPORT INTO does not currently support import into interleaved tables.
t.Run("import-into-rejects-interleaved-tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE parent (parent_id INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE child (
parent_id INT,
child_id INT,
PRIMARY KEY(parent_id, child_id))
INTERLEAVE IN PARENT parent(parent_id)`)
defer sqlDB.Exec(t, `DROP TABLE parent`)
defer sqlDB.Exec(t, `DROP TABLE child`)
// Cannot IMPORT INTO interleaved parent
sqlDB.ExpectErr(
t, "Cannot use IMPORT INTO with interleaved tables",
fmt.Sprintf(`IMPORT INTO parent (parent_id) CSV DATA (%s)`, testFiles.files[0]))
// Cannot IMPORT INTO interleaved child either.
sqlDB.ExpectErr(
t, "Cannot use IMPORT INTO with interleaved tables",
fmt.Sprintf(`IMPORT INTO child (parent_id, child_id) CSV DATA (%s)`, testFiles.files[0]))
})
// This tests that consecutive imports from unique data sources into an
// existing table without an explicit PK, do not overwrite each other. It
// exercises the row_id generation in IMPORT.
t.Run("multiple-import-into-without-pk", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
// Insert the test data
insert := []string{"''", "'text'", "'a'", "'e'", "'l'", "'t'", "'z'"}
numExistingRows := len(insert)
insertedRows := rowsPerFile * 3
for i, v := range insert {
sqlDB.Exec(t, "INSERT INTO t (a, b) VALUES ($1, $2)", i, v)
}
// Expect it to succeed with correct columns.
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[0]))
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[1]))
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[2]))
// Verify correct number of rows via COUNT.
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if expect := numExistingRows + insertedRows; result != expect {
t.Fatalf("expected %d rows, got %d", expect, result)
}
})
// This tests that a collision is not detected when importing the same source
// file twice in the same IMPORT, into a table without a PK. It exercises the
// row_id generation logic.
t.Run("multiple-file-import-into-without-pk", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s, %s)`, testFiles.files[0], testFiles.files[0]),
)
// Verify correct number of rows via COUNT.
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if result != rowsPerFile*2 {
t.Fatalf("expected %d rows, got %d", rowsPerFile*2, result)
}
})
// IMPORT INTO disallows shadowing of existing keys when ingesting data. With
// the exception of shadowing keys having the same ts and value.
//
// This tests key collision detection when importing the same source file
// twice. The ts across imports is different, and so this is considered a
// collision.
t.Run("import-into-same-file-diff-imports", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[0]),
)
sqlDB.ExpectErr(
t, `ingested key collides with an existing one: /Table/\d+/1/0/0`,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[0]),
)
})
// When the ts and value of the ingested keys across SSTs match the existing
// keys we do not consider this to be a collision. This is to support IMPORT
// job pause/resumption.
//
// To ensure uniform behavior we apply the same exception to keys within the
// same SST.
//
// This test attempts to ingest duplicate keys in the same SST, with the same
// value, and succeeds in doing so.
t.Run("import-into-dups-in-sst", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.fileWithDupKeySameValue[0]),
)
// Verify correct number of rows via COUNT.
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM t`).Scan(&result)
if result != 200 {
t.Fatalf("expected 200 rows, got %d", result)
}
})
// This tests key collision detection and importing a source file with the
// colliding key sandwiched between valid keys.
t.Run("import-into-key-collision", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[0]),
)
sqlDB.ExpectErr(
t, `ingested key collides with an existing one: /Table/\d+/1/0/0`,
fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.fileWithShadowKeys[0]),
)
})
// Tests that IMPORT INTO invalidates FK and CHECK constraints.
t.Run("import-into-invalidate-constraints", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE ref (b STRING PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE ref`)
sqlDB.Exec(t, `CREATE TABLE t (a INT CHECK (a >= 0), b STRING, CONSTRAINT fk_ref FOREIGN KEY (b) REFERENCES ref)`)
defer sqlDB.Exec(t, `DROP TABLE t`)
var checkValidated, fkValidated bool
sqlDB.QueryRow(t, `SELECT validated from [SHOW CONSTRAINT FROM t] WHERE constraint_name = 'check_a'`).Scan(&checkValidated)
sqlDB.QueryRow(t, `SELECT validated from [SHOW CONSTRAINT FROM t] WHERE constraint_name = 'fk_ref'`).Scan(&fkValidated)
// Prior to import all constraints should be validated.
if !checkValidated || !fkValidated {
t.Fatal("Constraints not validated on creation.\n")
}
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a, b) CSV DATA (%s)`, testFiles.files[0]))
sqlDB.QueryRow(t, `SELECT validated from [SHOW CONSTRAINT FROM t] WHERE constraint_name = 'check_a'`).Scan(&checkValidated)
sqlDB.QueryRow(t, `SELECT validated from [SHOW CONSTRAINT FROM t] WHERE constraint_name = 'fk_ref'`).Scan(&fkValidated)
// Following an import the constraints should be unvalidated.
if checkValidated || fkValidated {
t.Fatal("FK and CHECK constraints not unvalidated after IMPORT INTO\n")
}
})
// Test userfile IMPORT INTO CSV.
t.Run("import-into-userfile-simple", func(t *testing.T) {
userfileURI := "userfile://defaultdb.public.root/test.csv"
userfileStorage, err := tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.
ExternalStorageFromURI(ctx, userfileURI, security.RootUserName())
require.NoError(t, err)
data := []byte("1,2")
require.NoError(t, userfileStorage.WriteFile(ctx, "", bytes.NewReader(data)))
sqlDB.Exec(t, "CREATE TABLE foo (id INT PRIMARY KEY, id2 INT)")
sqlDB.Exec(t, fmt.Sprintf("IMPORT INTO foo (id, id2) CSV DATA ('%s')", userfileURI))
sqlDB.CheckQueryResults(t, "SELECT * FROM foo", sqlDB.QueryStr(t, "SELECT 1, 2"))
require.NoError(t, userfileStorage.Delete(ctx, ""))
})
t.Run("import-into-with-db-privs", func(t *testing.T) {
sqlDB.Exec(t, `USE defaultdb`)
sqlDB.Exec(t, `CREATE USER foo`)
sqlDB.Exec(t, `GRANT ALL ON DATABASE defaultdb TO foo`)
sqlDB.Exec(t, `CREATE TABLE d (a INT PRIMARY KEY, b STRING)`)
defer sqlDB.Exec(t, `DROP TABLE d`)
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO d (a, b) CSV DATA (%s)`,
testFiles.files[0]))
// Verify correct number of rows via COUNT.
var result int
sqlDB.QueryRow(t, `SELECT count(*) FROM d`).Scan(&result)
if result != rowsPerFile {
t.Fatalf("expected %d rows, got %d", rowsPerFile, result)
}
})
}
func benchUserUpload(b *testing.B, uploadBaseURI string) {
const (
nodes = 3
)
ctx := context.Background()
baseDir, cleanup := testutils.TempDir(b)
defer cleanup()
f, err := ioutil.TempFile(baseDir, "test_file")
require.NoError(b, err)
testFileBase := fmt.Sprintf("/%s", filepath.Base(f.Name()))
tc := testcluster.StartTestCluster(b, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
// Every row (int, string) generated by the CSVGenerator is ~25 bytes.
// So numRows gives us ~25 MiB of generated CSV content.
numRows := 1 * 1024 * 1024
csvGen := newCsvGenerator(0, numRows, &intGenerator{}, &strGenerator{})
uri, err := url.ParseRequestURI(uploadBaseURI)
require.NoError(b, err)
r, err := csvGen.Open()
require.NoError(b, err)
var numBytes int64
if uri.Scheme == "nodelocal" {
// Write the test data into a file.
require.NoError(b, err)
numBytes, err = io.Copy(f, r)
require.NoError(b, err)
} else if uri.Scheme == "userfile" {
// Write the test data to userfile storage.
userfileStorage, err := tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.
ExternalStorageFromURI(ctx, uploadBaseURI+testFileBase, security.RootUserName())
require.NoError(b, err)
content, err := ioutil.ReadAll(r)
require.NoError(b, err)
err = userfileStorage.WriteFile(ctx, "", bytes.NewReader(content))
require.NoError(b, err)
numBytes = int64(len(content))
} else {
b.Fatal(errors.New("benchmarking unsupported URI scheme"))
}
b.SetBytes(numBytes)
b.ResetTimer()
sqlDB.Exec(b,
fmt.Sprintf(
`IMPORT TABLE t (a INT8 PRIMARY KEY, b STRING, INDEX (b), INDEX (a, b))
CSV DATA ('%s%s')`,
uploadBaseURI, testFileBase,
))
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/importccl
// BenchmarkNodelocalImport-16 1 4444906026 ns/op 6.11 MB/s
// BenchmarkNodelocalImport-16 1 3943970329 ns/op 6.88 MB/s
// BenchmarkNodelocalImport-16 1 4372378719 ns/op 6.21 MB/s
// BenchmarkNodelocalImport-16 1 4182168878 ns/op 6.49 MB/s
// BenchmarkNodelocalImport-16 1 4255328766 ns/op 6.38 MB/s
// BenchmarkNodelocalImport-16 1 5367984071 ns/op 5.06 MB/s
// BenchmarkNodelocalImport-16 1 4130455146 ns/op 6.57 MB/s
// BenchmarkNodelocalImport-16 1 4080583559 ns/op 6.65 MB/s
// BenchmarkNodelocalImport-16 1 4774760252 ns/op 5.68 MB/s
// BenchmarkNodelocalImport-16 1 4967456028 ns/op 5.46 MB/s
func BenchmarkNodelocalImport(b *testing.B) {
benchUserUpload(b, "nodelocal://0")
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/importccl
// BenchmarkUserfileImport-16 1 3950434182 ns/op 6.87 MB/s
// BenchmarkUserfileImport-16 1 4087946074 ns/op 6.64 MB/s
// BenchmarkUserfileImport-16 1 4422526863 ns/op 6.14 MB/s
// BenchmarkUserfileImport-16 1 5062665154 ns/op 5.36 MB/s
// BenchmarkUserfileImport-16 1 3829669681 ns/op 7.09 MB/s
// BenchmarkUserfileImport-16 1 4553600442 ns/op 5.96 MB/s
// BenchmarkUserfileImport-16 1 4333825355 ns/op 6.26 MB/s
// BenchmarkUserfileImport-16 1 4565827783 ns/op 5.94 MB/s
// BenchmarkUserfileImport-16 1 4060204527 ns/op 6.68 MB/s
// BenchmarkUserfileImport-16 1 4627419761 ns/op 5.86 MB/s
func BenchmarkUserfileImport(b *testing.B) {
benchUserUpload(b, "userfile://defaultdb.public.root")
}
// a importRowProducer implementation that returns 'n' rows.
type csvBenchmarkStream struct {
n int
pos int
data [][]string
}
func (s *csvBenchmarkStream) Progress() float32 {
return float32(s.pos) / float32(s.n)
}
func (s *csvBenchmarkStream) Scan() bool {
s.pos++
return s.pos <= s.n
}
func (s *csvBenchmarkStream) Err() error {
return nil
}
func (s *csvBenchmarkStream) Skip() error {
return nil
}
func (s *csvBenchmarkStream) Row() (interface{}, error) {
return s.data[s.pos%len(s.data)], nil
}
// Read implements Reader interface. It's used by delimited
// benchmark to read its tab separated input.
func (s *csvBenchmarkStream) Read(buf []byte) (int, error) {
if s.Scan() {
r, err := s.Row()
if err != nil {
return 0, err
}
return copy(buf, strings.Join(r.([]string), "\t")+"\n"), nil
}
return 0, io.EOF
}
var _ importRowProducer = &csvBenchmarkStream{}
// BenchmarkConvertRecord-16 1000000 2107 ns/op 56.94 MB/s 3600 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2106 ns/op 56.97 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2100 ns/op 57.14 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2286 ns/op 52.49 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2378 ns/op 50.46 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2427 ns/op 49.43 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2399 ns/op 50.02 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2365 ns/op 50.73 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2376 ns/op 50.49 MB/s 3606 B/op 101 allocs/op
// BenchmarkConvertRecord-16 500000 2390 ns/op 50.20 MB/s 3606 B/op 101 allocs/op
func BenchmarkCSVConvertRecord(b *testing.B) {
ctx := context.Background()
tpchLineItemDataRows := [][]string{
{"1", "155190", "7706", "1", "17", "21168.23", "0.04", "0.02", "N", "O", "1996-03-13", "1996-02-12", "1996-03-22", "DELIVER IN PERSON", "TRUCK", "egular courts above the"},
{"1", "67310", "7311", "2", "36", "45983.16", "0.09", "0.06", "N", "O", "1996-04-12", "1996-02-28", "1996-04-20", "TAKE BACK RETURN", "MAIL", "ly final dependencies: slyly bold "},
{"1", "63700", "3701", "3", "8", "13309.60", "0.10", "0.02", "N", "O", "1996-01-29", "1996-03-05", "1996-01-31", "TAKE BACK RETURN", "REG AIR", "riously. regular, express dep"},
{"1", "2132", "4633", "4", "28", "28955.64", "0.09", "0.06", "N", "O", "1996-04-21", "1996-03-30", "1996-05-16", "NONE", "AIR", "lites. fluffily even de"},
{"1", "24027", "1534", "5", "24", "22824.48", "0.10", "0.04", "N", "O", "1996-03-30", "1996-03-14", "1996-04-01", "NONE", "FOB", " pending foxes. slyly re"},
{"1", "15635", "638", "6", "32", "49620.16", "0.07", "0.02", "N", "O", "1996-01-30", "1996-02-07", "1996-02-03", "DELIVER IN PERSON", "MAIL", "arefully slyly ex"},
{"2", "106170", "1191", "1", "38", "44694.46", "0.00", "0.05", "N", "O", "1997-01-28", "1997-01-14", "1997-02-02", "TAKE BACK RETURN", "RAIL", "ven requests. deposits breach a"},
{"3", "4297", "1798", "1", "45", "54058.05", "0.06", "0.00", "R", "F", "1994-02-02", "1994-01-04", "1994-02-23", "NONE", "AIR", "ongside of the furiously brave acco"},
{"3", "19036", "6540", "2", "49", "46796.47", "0.10", "0.00", "R", "F", "1993-11-09", "1993-12-20", "1993-11-24", "TAKE BACK RETURN", "RAIL", " unusual accounts. eve"},
{"3", "128449", "3474", "3", "27", "39890.88", "0.06", "0.07", "A", "F", "1994-01-16", "1993-11-22", "1994-01-23", "DELIVER IN PERSON", "SHIP", "nal foxes wake."},
}
b.SetBytes(120) // Raw input size. With 8 indexes, expect more on output side.
stmt, err := parser.ParseOne(`CREATE TABLE lineitem (
l_orderkey INT8 NOT NULL,
l_partkey INT8 NOT NULL,
l_suppkey INT8 NOT NULL,
l_linenumber INT8 NOT NULL,
l_quantity DECIMAL(15,2) NOT NULL,
l_extendedprice DECIMAL(15,2) NOT NULL,
l_discount DECIMAL(15,2) NOT NULL,
l_tax DECIMAL(15,2) NOT NULL,
l_returnflag CHAR(1) NOT NULL,
l_linestatus CHAR(1) NOT NULL,
l_shipdate DATE NOT NULL,
l_commitdate DATE NOT NULL,
l_receiptdate DATE NOT NULL,
l_shipinstruct CHAR(25) NOT NULL,
l_shipmode CHAR(10) NOT NULL,
l_comment VARCHAR(44) NOT NULL,
PRIMARY KEY (l_orderkey, l_linenumber),
INDEX l_ok (l_orderkey ASC),
INDEX l_pk (l_partkey ASC),
INDEX l_sk (l_suppkey ASC),
INDEX l_sd (l_shipdate ASC),
INDEX l_cd (l_commitdate ASC),
INDEX l_rd (l_receiptdate ASC),
INDEX l_pk_sk (l_partkey ASC, l_suppkey ASC),
INDEX l_sk_pk (l_suppkey ASC, l_partkey ASC)
)`)
if err != nil {
b.Fatal(err)
}
create := stmt.AST.(*tree.CreateTable)
st := cluster.MakeTestingClusterSettings()
semaCtx := tree.MakeSemaContext()
evalCtx := tree.MakeTestingEvalContext(st)
tableDesc, err := MakeSimpleTableDescriptor(ctx, &semaCtx, st, create, descpb.ID(100), keys.PublicSchemaID, descpb.ID(100), NoFKs, 1)
if err != nil {
b.Fatal(err)
}
kvCh := make(chan row.KVBatch)
// no-op drain kvs channel.
go func() {
for range kvCh {
}
}()
importCtx := ¶llelImportContext{
evalCtx: &evalCtx,
tableDesc: tableDesc.ImmutableCopy().(catalog.TableDescriptor),
kvCh: kvCh,
}
producer := &csvBenchmarkStream{
n: b.N,
pos: 0,
data: tpchLineItemDataRows,
}
consumer := &csvRowConsumer{importCtx: importCtx, opts: &roachpb.CSVOptions{}}
b.ResetTimer()
require.NoError(b, runParallelImport(ctx, importCtx, &importFileContext{}, producer, consumer))
close(kvCh)
b.ReportAllocs()
}
func selectNotNull(col string) string {
return fmt.Sprintf(`SELECT %s FROM t WHERE %s IS NOT NULL`, col, col)
}
// Test that IMPORT INTO works when columns with default expressions are present.
// The default expressions supported by IMPORT INTO are constant expressions,
// which are literals and functions that always return the same value given the
// same arguments (examples of non-constant expressions are given in the last two
// subtests below). The default expression of a column is used when this column is not
// targeted; otherwise, data from source file (like CSV) is used. It also checks
// that IMPORT TABLE works when there are default columns.
func TestImportDefault(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1min under race")
const nodes = 3
numFiles := nodes + 2
rowsPerFile := 1000
rowsPerRaceFile := 16
testFiles := makeCSVData(t, numFiles, rowsPerFile, nodes, rowsPerRaceFile)
ctx := context.Background()
baseDir := filepath.Join("testdata", "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
tests := []struct {
name string
data string
create string
targetCols string
format string
sequence string
with string
// We expect exactly one of expectedResults and expectedError:
// the latter is relevant for default expressions we don't support.
expectedResults [][]string
expectedError string
}{
// CSV formats.
{
name: "is-not-target",
data: "1\n2",
create: "b INT DEFAULT 42, a INT",
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"42", "1"}, {"42", "2"}},
},
{
name: "is-not-target-not-null",
data: "1\n2",
create: "a INT, b INT DEFAULT 42 NOT NULL",
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"1", "42"}, {"2", "42"}},
},
{
name: "is-target",
data: "1,36\n2,37",
create: "a INT, b INT DEFAULT 42",
targetCols: "a, b",
format: "CSV",
expectedResults: [][]string{{"1", "36"}, {"2", "37"}},
},
{
name: "mixed-target-and-non-target",
data: "35,test string\n72,another test string",
create: "b STRING, a INT DEFAULT 53, c INT DEFAULT 42",
targetCols: "a, b",
format: "CSV",
expectedResults: [][]string{{"test string", "35", "42"}, {"another test string", "72", "42"}},
},
{
name: "null-as-default",
data: "1\n2\n3",
create: "a INT DEFAULT NULL, b INT",
targetCols: "b",
format: "CSV",
expectedResults: [][]string{{"NULL", "1"}, {"NULL", "2"}, {"NULL", "3"}},
},
{
name: "is-target-with-null-data",
data: ",36\n2,",
create: "a INT, b INT DEFAULT 42",
targetCols: "a, b",
format: "CSV",
with: `nullif = ''`,
expectedResults: [][]string{{"NULL", "36"}, {"2", "NULL"}},
},
{
name: "math-constant",
data: "35\n67",
create: "a INT, b FLOAT DEFAULT round(pi())",
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"35", "3"}, {"67", "3"}},
},
{
name: "string-function",
data: "1\n2",
create: `a INT, b STRING DEFAULT repeat('dog', 2)`,
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"1", "dogdog"}, {"2", "dogdog"}},
},
{
name: "arithmetic",
data: "1\n2",
create: `a INT, b INT DEFAULT 34 * 3`,
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"1", "102"}, {"2", "102"}},
},
// TODO (anzoteh96): add AVRO format, and also MySQL and PGDUMP once
// IMPORT INTO are supported for these file formats.
{
name: "delimited",
data: "1\t2\n3\t4",
create: "a INT, b INT DEFAULT 42, c INT",
targetCols: "c, a",
format: "DELIMITED",
expectedResults: [][]string{{"2", "42", "1"}, {"4", "42", "3"}},
},
{
name: "pgcopy",
data: "1,2\n3,4",
create: "a INT, b INT DEFAULT 42, c INT",
targetCols: "c, a",
with: `delimiter = ","`,
format: "PGCOPY",
expectedResults: [][]string{{"2", "42", "1"}, {"4", "42", "3"}},
},
}
for _, test := range tests {
if test.sequence != "" {
defer sqlDB.Exec(t, fmt.Sprintf(`DROP SEQUENCE IF EXISTS %s`, test.sequence))
}
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE t`)
if test.sequence != "" {
sqlDB.Exec(t, fmt.Sprintf(`CREATE SEQUENCE %s`, test.sequence))
}
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t (%s)`, test.create))
data = test.data
importStmt := fmt.Sprintf(`IMPORT INTO t (%s) %s DATA ("%s")`, test.targetCols, test.format, srv.URL)
if test.with != "" {
importStmt = importStmt + fmt.Sprintf(` WITH %s`, test.with)
}
if test.expectedError != "" {
sqlDB.ExpectErr(t, test.expectedError, importStmt)
} else {
sqlDB.Exec(t, importStmt)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`, test.expectedResults)
}
})
}
t.Run("current-timestamp", func(t *testing.T) {
data = "1\n2\n3\n4\n5\n6"
testCases := []struct {
name string
defaultExpr string
colType string
truncate time.Duration
}{
{
name: "current_date",
defaultExpr: "current_date()",
colType: "DATE",
truncate: 24 * time.Hour,
},
{
name: "current_timestamp",
defaultExpr: "current_timestamp()",
colType: "TIMESTAMP",
},
{
name: "current_timestamp_with_precision",
defaultExpr: "current_timestamp(3)",
colType: "TIMESTAMP",
truncate: time.Millisecond,
},
{
name: "current_timestamp_as_int",
defaultExpr: "current_timestamp()::int",
colType: "INT",
},
{
name: "localtimestamp",
defaultExpr: "localtimestamp()::TIMESTAMPTZ",
colType: "TIMESTAMPTZ",
},
{
name: "localtimestamp_with_precision",
defaultExpr: "localtimestamp(3)",
colType: "TIMESTAMP",
truncate: time.Millisecond,
},
{
name: "localtimestamp_with_expr_precision",
defaultExpr: "localtimestamp(1+2+3)",
colType: "TIMESTAMP",
},
{
name: "now",
defaultExpr: "now()",
colType: "TIMESTAMP",
},
{
name: "now-case-insensitive",
defaultExpr: "NoW()",
colType: "DATE",
},
{
name: "pg_catalog.now",
defaultExpr: "pg_catalog.now()",
colType: "DATE",
},
{
name: "statement_timestamp",
defaultExpr: "statement_timestamp()",
colType: "TIMESTAMP",
},
{
name: "transaction_timestamp",
defaultExpr: "transaction_timestamp()",
colType: "TIMESTAMP",
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t(a INT, b %s DEFAULT %s)`, test.colType, test.defaultExpr))
minTs := timeutil.Now()
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (a) CSV DATA ("%s")`, srv.URL))
maxTs := timeutil.Now()
if test.truncate != 0 {
minTs = minTs.Truncate(test.truncate)
maxTs = maxTs.Truncate(test.truncate)
}
var numBadRows int
if test.colType == "INT" {
minTsInt := minTs.Unix()
maxTsInt := maxTs.Unix()
sqlDB.QueryRow(t,
`SELECT count(*) FROM t WHERE b !=(SELECT b FROM t WHERE a=1) OR b IS NULL or b < $1 or b > $2`,
minTsInt,
maxTsInt,
).Scan(&numBadRows)
} else {
sqlDB.QueryRow(t,
`SELECT count(*) FROM t WHERE b !=(SELECT b FROM t WHERE a=1) OR b IS NULL or b < $1 or b > $2`,
minTs,
maxTs,
).Scan(&numBadRows)
}
require.Equal(t, 0, numBadRows)
})
}
})
t.Run("unique_rowid", func(t *testing.T) {
const M = int(1e9 + 7) // Remainder for unique_rowid addition.
testCases := []struct {
name string
create string
targetCols []string
insert string
rowIDCols []string
}{
{
name: "multiple_unique_rowid",
create: "a INT DEFAULT unique_rowid(), b INT, c STRING, d INT DEFAULT unique_rowid()",
targetCols: []string{"b", "c"},
insert: "INSERT INTO t (b, c) VALUES (3, 'CAT'), (4, 'DOG')",
rowIDCols: []string{selectNotNull("a"), selectNotNull("d")},
},
{
name: "unique_rowid_with_pk",
create: "a INT DEFAULT unique_rowid(), b INT PRIMARY KEY, c STRING",
targetCols: []string{"b", "c"},
insert: "INSERT INTO t (b, c) VALUES (-3, 'CAT'), (-4, 'DOG')",
rowIDCols: []string{selectNotNull("a")},
},
{
// unique_rowid()+unique_rowid() won't work as the rowid produced by import
// has its leftmost bit set to 1, and adding them causes overflow. A way to
// get around is to have each unique_rowid() modulo a number, M. Here M = 1e9+7
// is used here given that it's big enough and is a prime, which is
// generally effective in avoiding collisions.
name: "rowid+rowid",
create: fmt.Sprintf(
`a INT DEFAULT (unique_rowid() %% %d) + (unique_rowid() %% %d), b INT PRIMARY KEY, c STRING`, M, M),
targetCols: []string{"b", "c"},
rowIDCols: []string{selectNotNull("a")},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t(%s)`, test.create))
if test.insert != "" {
sqlDB.Exec(t, test.insert)
}
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (%s) CSV DATA (%s)`,
strings.Join(test.targetCols, ", "),
strings.Join(testFiles.files, ", ")))
var numDistinctRows int
sqlDB.QueryRow(t,
fmt.Sprintf(`SELECT DISTINCT COUNT (*) FROM (%s)`,
strings.Join(test.rowIDCols, " UNION ")),
).Scan(&numDistinctRows)
var numRows int
sqlDB.QueryRow(t, `SELECT COUNT (*) FROM t`).Scan(&numRows)
require.Equal(t, numDistinctRows, len(test.rowIDCols)*numRows)
})
}
})
t.Run("random-functions", func(t *testing.T) {
testCases := []struct {
name string
create string
targetCols []string
randomCols []string
data string
}{
{
name: "random-multiple",
create: "a INT, b FLOAT PRIMARY KEY DEFAULT random(), c STRING, d FLOAT DEFAULT random()",
targetCols: []string{"a", "c"},
randomCols: []string{selectNotNull("b"), selectNotNull("d")},
},
{
name: "gen_random_uuid",
create: "a INT, b STRING, c UUID PRIMARY KEY DEFAULT gen_random_uuid(), d UUID DEFAULT gen_random_uuid()",
targetCols: []string{"a", "b"},
randomCols: []string{selectNotNull("c"), selectNotNull("d")},
},
{
name: "mixed_random_uuid",
create: "a INT, b STRING, c UUID PRIMARY KEY DEFAULT gen_random_uuid(), d FLOAT DEFAULT random()",
targetCols: []string{"a", "b"},
randomCols: []string{selectNotNull("c")},
},
{
name: "random_with_targeted",
create: "a INT, b FLOAT DEFAULT random(), d FLOAT DEFAULT random()",
targetCols: []string{"a", "b"},
randomCols: []string{selectNotNull("d")},
data: "1,0.37\n2,0.455\n3,3.14\n4,0.246\n5,0.42",
},
// TODO (anzoteh96): create a testcase for AVRO once we manage to extract
// targeted columns from the AVRO schema.
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t(%s)`, test.create))
fileName := strings.Join(testFiles.files, ", ")
if test.data != "" {
data = test.data
fileName = fmt.Sprintf(`%q`, srv.URL)
}
// Let's do 3 IMPORTs for each test case to ensure that the values produced
// do not overlap.
for i := 0; i < 3; i++ {
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (%s) CSV DATA (%s)`,
strings.Join(test.targetCols, ", "),
fileName))
}
var numDistinctRows int
sqlDB.QueryRow(t,
fmt.Sprintf(`SELECT DISTINCT COUNT (*) FROM (%s)`,
strings.Join(test.randomCols, " UNION ")),
).Scan(&numDistinctRows)
var numRows int
sqlDB.QueryRow(t, `SELECT COUNT (*) FROM t`).Scan(&numRows)
require.Equal(t, numDistinctRows, len(test.randomCols)*numRows)
})
}
})
}
// This is a regression test for #61203. We test that the random() keys are
// unique on a larger data set. This would previously fail with a primary key
// collision error since we would generate duplicate UUIDs.
//
// Note: that although there is no guarantee that UUIDs do not collide, the
// probability of such a collision is vanishingly low.
func TestUniqueUUID(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// This test is slow under race since it explicitly tried to import a large
// amount of data.
skip.UnderRace(t, "slow under race")
const (
nodes = 3
dataDir = "userfile://defaultdb.my_files/export"
dataFiles = dataDir + "/*"
)
ctx := context.Background()
args := base.TestServerArgs{}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
connDB := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(connDB)
dataSize := parallelImporterReaderBatchSize * 100
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE data AS SELECT * FROM generate_series(1, %d);`, dataSize))
sqlDB.Exec(t, `EXPORT INTO CSV $1 FROM TABLE data;`, dataDir)
// Ensure that UUIDs do not collide when importing 20000 rows.
sqlDB.Exec(t, `CREATE TABLE r1 (a UUID PRIMARY KEY DEFAULT gen_random_uuid(), b INT);`)
sqlDB.Exec(t, `IMPORT INTO r1 (b) CSV DATA ($1);`, dataFiles)
// Ensure that UUIDs do not collide when importing into a table with several UUID calls.
sqlDB.Exec(t, `CREATE TABLE r2 (a UUID PRIMARY KEY DEFAULT gen_random_uuid(), b INT, c UUID DEFAULT gen_random_uuid());`)
sqlDB.Exec(t, `IMPORT INTO r2 (b) CSV DATA ($1);`, dataFiles)
// Ensure that random keys do not collide.
sqlDB.Exec(t, `CREATE TABLE r3 (a FLOAT PRIMARY KEY DEFAULT random(), b INT);`)
sqlDB.Exec(t, `IMPORT INTO r3 (b) CSV DATA ($1);`, dataFiles)
}
func TestImportDefaultNextVal(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer setImportReaderParallelism(1)()
skip.UnderStressRace(t, "test hits a timeout before a successful run")
const nodes = 3
numFiles := 1
rowsPerFile := 1000
rowsPerRaceFile := 16
testFiles := makeCSVData(t, numFiles, rowsPerFile, numFiles, rowsPerRaceFile)
ctx := context.Background()
baseDir := filepath.Join("testdata", "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
type seqMetadata struct {
start int
increment int
expectedImportChunkAllocs int
// We process fewer rows under race.
expectedImportChunkAllocsUnderRace int
}
t.Run("nextval", func(t *testing.T) {
testCases := []struct {
name string
create string
targetCols []string
seqToNumNextval map[string]seqMetadata
insertData string
}{
{
name: "simple-nextval",
create: "a INT, b INT DEFAULT nextval('myseq'), c STRING",
targetCols: []string{"a", "c"},
// 1000 rows means we will allocate 3 chunks of 10, 100, 1000.
// The 2 inserts will add 6 more nextval calls.
// First insert: 1->3
// Import: 3->1113
// Second insert 1113->1116
seqToNumNextval: map[string]seqMetadata{"myseq": {1, 1, 1116, 116}},
insertData: `(1, 'cat'), (2, 'him'), (3, 'meme')`,
},
{
name: "simple-nextval-with-increment-and-start",
create: "a INT, b INT DEFAULT nextval('myseq'), c STRING",
targetCols: []string{"a", "c"},
// 1000 rows means we will allocate 3 chunks of 10, 100, 1000.
// The 2 inserts will add 6 more nextval calls.
// First insert: 100->120
// Import: 120->11220
// Second insert: 11220->11250
seqToNumNextval: map[string]seqMetadata{"myseq": {100, 10, 11250, 1250}},
insertData: `(1, 'cat'), (2, 'him'), (3, 'meme')`,
},
{
name: "two-nextval-diff-seq",
create: "a INT, b INT DEFAULT nextval('myseq') + nextval('myseq2'), c STRING",
targetCols: []string{"a", "c"},
seqToNumNextval: map[string]seqMetadata{"myseq": {1, 1, 1116, 116},
"myseq2": {1, 1, 1116, 116}},
insertData: `(1, 'cat'), (2, 'him'), (3, 'meme')`,
},
// TODO(adityamaru): Unskip once #56387 is fixed.
//{
// name: "two-nextval-same-seq",
// create: "a INT, b INT DEFAULT nextval('myseq') + nextval('myseq'),
// c STRING",
// targetCols: []string{"a", "c"},
// seqToNumNextval: map[string]int{"myseq": 1, "myseq2": 1},
// expectedImportChunkAllocs: 1110,
//},
{
name: "two-nextval-cols-same-seq",
create: "a INT, b INT DEFAULT nextval('myseq'), c STRING, d INT DEFAULT nextval('myseq')",
targetCols: []string{"a", "c"},
// myseq will allocate 10, 100, 1000, 10000 for the 2000 rows.
// 2 inserts will consume 12 more nextval calls.
// First insert: 1->6
// Import: 6->11116
// Second insert: 11116->11122
seqToNumNextval: map[string]seqMetadata{"myseq": {1, 1, 11122, 122}},
insertData: `(1, 'cat'), (2, 'him'), (3, 'meme')`,
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE t`)
for seqName := range test.seqToNumNextval {
sqlDB.Exec(t, fmt.Sprintf(`DROP SEQUENCE IF EXISTS %s`, seqName))
sqlDB.Exec(t, fmt.Sprintf(`CREATE SEQUENCE %s START %d INCREMENT %d`, seqName,
test.seqToNumNextval[seqName].start, test.seqToNumNextval[seqName].increment))
}
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t (%s)`, test.create))
sqlDB.Exec(t, fmt.Sprintf(`INSERT INTO t (%s) VALUES %s`,
strings.Join(test.targetCols, ", "), test.insertData))
sqlDB.Exec(t, fmt.Sprintf(`IMPORT INTO t (%s) CSV DATA (%s)`,
strings.Join(test.targetCols, ", "), strings.Join(testFiles.files, ", ")))
sqlDB.Exec(t, fmt.Sprintf(`INSERT INTO t (%s) VALUES %s`,
strings.Join(test.targetCols, ", "), test.insertData))
for seqName := range test.seqToNumNextval {
var seqVal int
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT last_value from %s`, seqName)).Scan(&seqVal)
expectedVal := test.seqToNumNextval[seqName].expectedImportChunkAllocs
if util.RaceEnabled {
expectedVal = test.seqToNumNextval[seqName].expectedImportChunkAllocsUnderRace
}
require.Equal(t, expectedVal, seqVal)
}
})
}
})
}
func TestImportDefaultWithResume(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer setImportReaderParallelism(1)()
const batchSize = 5
defer TestingSetParallelImporterReaderBatchSize(batchSize)()
defer row.TestingSetDatumRowConverterBatchSize(2 * batchSize)()
jobs.DefaultAdoptInterval = 100 * time.Millisecond
s, db, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
RegistryLiveness: jobs.NewFakeNodeLiveness(1),
DistSQL: &execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
})
registry := s.JobRegistry().(*jobs.Registry)
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
testCases := []struct {
name string
create string
targetCols string
format string
sequence string
}{
{
name: "nextval",
create: "a INT, b STRING, c INT PRIMARY KEY DEFAULT nextval('mysequence')",
targetCols: "a, b",
sequence: "mysequence",
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer fmt.Sprintf(`DROP SEQUENCE IF EXISTS %s`, test.sequence)
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.Exec(t, fmt.Sprintf(`CREATE SEQUENCE %s`, test.sequence))
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE t (%s)`, test.create))
jobCtx, cancelImport := context.WithCancel(ctx)
jobIDCh := make(chan jobspb.JobID)
var jobID jobspb.JobID = -1
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
// Arrange for our special job resumer to be
// returned the very first time we start the import.
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
resumer := raw.(*importResumer)
resumer.testingKnobs.ignoreProtectedTimestamps = true
resumer.testingKnobs.alwaysFlushJobProgress = true
resumer.testingKnobs.afterImport = func(summary backupccl.RowCount) error {
return nil
}
if jobID == -1 {
return &cancellableImportResumer{
ctx: jobCtx,
jobIDCh: jobIDCh,
wrapped: resumer,
}
}
return resumer
},
}
expectedNumRows := 10*batchSize + 1
testBarrier, csvBarrier := newSyncBarrier()
csv1 := newCsvGenerator(0, expectedNumRows, &intGenerator{}, &strGenerator{})
csv1.addBreakpoint(7*batchSize, func() (bool, error) {
defer csvBarrier.Enter()()
return false, nil
})
// Convince distsql to use our "external" storage implementation.
storage := newGeneratedStorage(csv1)
s.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ExternalStorage = storage.externalStorageFactory()
// Execute import; ignore any errors returned
// (since we're aborting the first import run.).
go func() {
_, _ = sqlDB.DB.ExecContext(ctx,
fmt.Sprintf(`IMPORT INTO t (%s) CSV DATA ($1)`, test.targetCols), storage.getGeneratorURIs()[0])
}()
jobID = <-jobIDCh
// Wait until we are blocked handling breakpoint.
unblockImport := testBarrier.Enter()
// Wait until we have recorded some job progress.
js := queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool {
return js.prog.ResumePos[0] > 0
})
// Pause the job;
if err := registry.PauseRequested(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
// Send cancellation and unblock breakpoint.
cancelImport()
unblockImport()
// Get number of sequence value chunks which have been reserved.
js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool {
return jobs.StatusPaused == js.status
})
// We expect two chunk entries since our breakpoint is at 7*batchSize.
// [1, 10] and [11, 100]
var id int32
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT id FROM system.namespace WHERE name='%s'`,
test.sequence)).Scan(&id)
seqDetailsOnPause := js.prog.SequenceDetails
chunksOnPause := seqDetailsOnPause[0].SeqIdToChunks[id].Chunks
require.Equal(t, len(chunksOnPause), 2)
require.Equal(t, chunksOnPause[0].ChunkStartVal, int64(1))
require.Equal(t, chunksOnPause[0].ChunkSize, int64(10))
require.Equal(t, chunksOnPause[1].ChunkStartVal, int64(11))
require.Equal(t, chunksOnPause[1].ChunkSize, int64(100))
// Just to be doubly sure, check the sequence value before and after
// resumption to make sure it hasn't changed.
var seqValOnPause int64
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT last_value FROM %s`, test.sequence)).Scan(&seqValOnPause)
// Unpause the job and wait for it to complete.
if err := registry.Unpause(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusSucceeded == js.status })
// No additional chunks should have been allocated on job resumption since
// we already have enough chunks of the sequence values to cover all the
// rows.
seqDetailsOnSuccess := js.prog.SequenceDetails
require.Equal(t, seqDetailsOnPause, seqDetailsOnSuccess)
var seqValOnSuccess int64
sqlDB.QueryRow(t, fmt.Sprintf(`SELECT last_value FROM %s`,
test.sequence)).Scan(&seqValOnSuccess)
require.Equal(t, seqValOnPause, seqValOnSuccess)
})
}
}
func TestImportComputed(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const nodes = 3
ctx := context.Background()
baseDir := filepath.Join("testdata", "csv")
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
avroField := []map[string]interface{}{
{
"name": "a",
"type": "int",
},
{
"name": "b",
"type": "int",
},
}
avroRows := []map[string]interface{}{
{"a": 1, "b": 2}, {"a": 3, "b": 4},
}
avroData := createAvroData(t, "t", avroField, avroRows)
pgdumpData := `
CREATE TABLE users (a INT, b INT, c INT AS (a + b) STORED);
INSERT INTO users (a, b) VALUES (1, 2), (3, 4);
`
defer srv.Close()
tests := []struct {
into bool
name string
data string
create string
targetCols string
format string
// We expect exactly one of expectedResults and expectedError.
expectedResults [][]string
expectedError string
}{
{
into: true,
name: "addition",
data: "35,23\n67,10",
create: "a INT, b INT, c INT AS (a + b) STORED",
targetCols: "a, b",
format: "CSV",
expectedResults: [][]string{{"35", "23", "58"}, {"67", "10", "77"}},
},
{
into: true,
name: "cannot-be-targeted",
data: "1,2,3\n3,4,5",
create: "a INT, b INT, c INT AS (a + b) STORED",
targetCols: "a, b, c",
format: "CSV",
expectedError: `cannot write directly to computed column "c"`,
},
{
into: true,
name: "with-default",
data: "35\n67",
create: "a INT, b INT DEFAULT 42, c INT AS (a + b) STORED",
targetCols: "a",
format: "CSV",
expectedResults: [][]string{{"35", "42", "77"}, {"67", "42", "109"}},
},
{
into: true,
name: "target-cols-reordered",
data: "1,2\n3,4",
create: "a INT, b INT AS (a + c) STORED, c INT",
targetCols: "a, c",
format: "CSV",
expectedResults: [][]string{{"1", "3", "2"}, {"3", "7", "4"}},
},
{
into: true,
name: "import-into-avro",
data: avroData,
create: "a INT, b INT, c INT AS (a + b) STORED",
targetCols: "a, b",
format: "AVRO",
expectedResults: [][]string{{"1", "2", "3"}, {"3", "4", "7"}},
},
{
into: false,
name: "import-table-csv",
data: "35,23\n67,10",
create: "a INT, c INT AS (a + b) STORED, b INT",
format: "CSV",
expectedError: "to use computed columns, use IMPORT INTO",
},
{
into: false,
name: "import-table-avro",
data: avroData,
create: "a INT, c INT AS (a + b) STORED, b INT",
format: "AVRO",
expectedResults: [][]string{{"1", "3", "2"}, {"3", "7", "4"}},
},
{
into: false,
name: "pgdump",
data: pgdumpData,
format: "PGDUMP",
expectedResults: [][]string{{"1", "2", "3"}, {"3", "4", "7"}},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
defer sqlDB.Exec(t, `DROP TABLE IF EXISTS users`)
data = test.data
var importStmt string
if test.into {
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE users (%s)`, test.create))
importStmt = fmt.Sprintf(`IMPORT INTO users (%s) %s DATA (%q)`,
test.targetCols, test.format, srv.URL)
} else {
if test.format == "CSV" || test.format == "AVRO" {
importStmt = fmt.Sprintf(
`IMPORT TABLE users (%s) %s DATA (%q)`, test.create, test.format, srv.URL)
} else {
importStmt = fmt.Sprintf(`IMPORT %s (%q)`, test.format, srv.URL)
}
}
if test.expectedError != "" {
sqlDB.ExpectErr(t, test.expectedError, importStmt)
} else {
sqlDB.Exec(t, importStmt)
sqlDB.CheckQueryResults(t, `SELECT * FROM users`, test.expectedResults)
}
})
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/importccl
// BenchmarkDelimitedConvertRecord-16 500000 2473 ns/op 48.51 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 2580 ns/op 46.51 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 2678 ns/op 44.80 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 2897 ns/op 41.41 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 3250 ns/op 36.92 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 3261 ns/op 36.80 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 3016 ns/op 39.79 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 2943 ns/op 40.77 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 3004 ns/op 39.94 MB/s
// BenchmarkDelimitedConvertRecord-16 500000 2966 ns/op 40.45 MB/s
func BenchmarkDelimitedConvertRecord(b *testing.B) {
ctx := context.Background()
tpchLineItemDataRows := [][]string{
{"1", "155190", "7706", "1", "17", "21168.23", "0.04", "0.02", "N", "O", "1996-03-13", "1996-02-12", "1996-03-22", "DELIVER IN PERSON", "TRUCK", "egular courts above the"},
{"1", "67310", "7311", "2", "36", "45983.16", "0.09", "0.06", "N", "O", "1996-04-12", "1996-02-28", "1996-04-20", "TAKE BACK RETURN", "MAIL", "ly final dependencies: slyly bold "},
{"1", "63700", "3701", "3", "8", "13309.60", "0.10", "0.02", "N", "O", "1996-01-29", "1996-03-05", "1996-01-31", "TAKE BACK RETURN", "REG AIR", "riously. regular, express dep"},
{"1", "2132", "4633", "4", "28", "28955.64", "0.09", "0.06", "N", "O", "1996-04-21", "1996-03-30", "1996-05-16", "NONE", "AIR", "lites. fluffily even de"},
{"1", "24027", "1534", "5", "24", "22824.48", "0.10", "0.04", "N", "O", "1996-03-30", "1996-03-14", "1996-04-01", "NONE", "FOB", " pending foxes. slyly re"},
{"1", "15635", "638", "6", "32", "49620.16", "0.07", "0.02", "N", "O", "1996-01-30", "1996-02-07", "1996-02-03", "DELIVER IN PERSON", "MAIL", "arefully slyly ex"},
{"2", "106170", "1191", "1", "38", "44694.46", "0.00", "0.05", "N", "O", "1997-01-28", "1997-01-14", "1997-02-02", "TAKE BACK RETURN", "RAIL", "ven requests. deposits breach a"},
{"3", "4297", "1798", "1", "45", "54058.05", "0.06", "0.00", "R", "F", "1994-02-02", "1994-01-04", "1994-02-23", "NONE", "AIR", "ongside of the furiously brave acco"},
{"3", "19036", "6540", "2", "49", "46796.47", "0.10", "0.00", "R", "F", "1993-11-09", "1993-12-20", "1993-11-24", "TAKE BACK RETURN", "RAIL", " unusual accounts. eve"},
{"3", "128449", "3474", "3", "27", "39890.88", "0.06", "0.07", "A", "F", "1994-01-16", "1993-11-22", "1994-01-23", "DELIVER IN PERSON", "SHIP", "nal foxes wake."},
}
b.SetBytes(120) // Raw input size. With 8 indexes, expect more on output side.
stmt, err := parser.ParseOne(`CREATE TABLE lineitem (
l_orderkey INT8 NOT NULL,
l_partkey INT8 NOT NULL,
l_suppkey INT8 NOT NULL,
l_linenumber INT8 NOT NULL,
l_quantity DECIMAL(15,2) NOT NULL,
l_extendedprice DECIMAL(15,2) NOT NULL,
l_discount DECIMAL(15,2) NOT NULL,
l_tax DECIMAL(15,2) NOT NULL,
l_returnflag CHAR(1) NOT NULL,
l_linestatus CHAR(1) NOT NULL,
l_shipdate DATE NOT NULL,
l_commitdate DATE NOT NULL,
l_receiptdate DATE NOT NULL,
l_shipinstruct CHAR(25) NOT NULL,
l_shipmode CHAR(10) NOT NULL,
l_comment VARCHAR(44) NOT NULL,
PRIMARY KEY (l_orderkey, l_linenumber),
INDEX l_ok (l_orderkey ASC),
INDEX l_pk (l_partkey ASC),
INDEX l_sk (l_suppkey ASC),
INDEX l_sd (l_shipdate ASC),
INDEX l_cd (l_commitdate ASC),
INDEX l_rd (l_receiptdate ASC),
INDEX l_pk_sk (l_partkey ASC, l_suppkey ASC),
INDEX l_sk_pk (l_suppkey ASC, l_partkey ASC)
)`)
if err != nil {
b.Fatal(err)
}
create := stmt.AST.(*tree.CreateTable)
st := cluster.MakeTestingClusterSettings()
semaCtx := tree.MakeSemaContext()
evalCtx := tree.MakeTestingEvalContext(st)
tableDesc, err := MakeSimpleTableDescriptor(ctx, &semaCtx, st, create, descpb.ID(100), keys.PublicSchemaID, descpb.ID(100), NoFKs, 1)
if err != nil {
b.Fatal(err)
}
kvCh := make(chan row.KVBatch)
// no-op drain kvs channel.
go func() {
for range kvCh {
}
}()
cols := make(tree.NameList, len(tableDesc.Columns))
for i, col := range tableDesc.Columns {
cols[i] = tree.Name(col.Name)
}
r, err := newMysqloutfileReader(roachpb.MySQLOutfileOptions{
RowSeparator: '\n',
FieldSeparator: '\t',
}, kvCh, 0, 0,
tableDesc.ImmutableCopy().(catalog.TableDescriptor), nil /* targetCols */, &evalCtx)
require.NoError(b, err)
producer := &csvBenchmarkStream{
n: b.N,
pos: 0,
data: tpchLineItemDataRows,
}
delimited := &fileReader{Reader: producer}
b.ResetTimer()
require.NoError(b, r.readFile(ctx, delimited, 0, 0, nil))
close(kvCh)
b.ReportAllocs()
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/importccl
// BenchmarkPgCopyConvertRecord-16 317534 3752 ns/op 31.98 MB/s
// BenchmarkPgCopyConvertRecord-16 317433 3767 ns/op 31.86 MB/s
// BenchmarkPgCopyConvertRecord-16 308832 3867 ns/op 31.03 MB/s
// BenchmarkPgCopyConvertRecord-16 255715 3913 ns/op 30.67 MB/s
// BenchmarkPgCopyConvertRecord-16 303086 3942 ns/op 30.44 MB/s
// BenchmarkPgCopyConvertRecord-16 304741 3520 ns/op 34.09 MB/s
// BenchmarkPgCopyConvertRecord-16 338954 3506 ns/op 34.22 MB/s
// BenchmarkPgCopyConvertRecord-16 339795 3531 ns/op 33.99 MB/s
// BenchmarkPgCopyConvertRecord-16 339940 3610 ns/op 33.24 MB/s
// BenchmarkPgCopyConvertRecord-16 307701 3833 ns/op 31.30 MB/s
func BenchmarkPgCopyConvertRecord(b *testing.B) {
ctx := context.Background()
tpchLineItemDataRows := [][]string{
{"1", "155190", "7706", "1", "17", "21168.23", "0.04", "0.02", "N", "O", "1996-03-13", "1996-02-12", "1996-03-22", "DELIVER IN PERSON", "TRUCK", "egular courts above the"},
{"1", "67310", "7311", "2", "36", "45983.16", "0.09", "0.06", "N", "O", "1996-04-12", "1996-02-28", "1996-04-20", "TAKE BACK RETURN", "MAIL", "ly final dependencies: slyly bold "},
{"1", "63700", "3701", "3", "8", "13309.60", "0.10", "0.02", "N", "O", "1996-01-29", "1996-03-05", "1996-01-31", "TAKE BACK RETURN", "REG AIR", "riously. regular, express dep"},
{"1", "2132", "4633", "4", "28", "28955.64", "0.09", "0.06", "N", "O", "1996-04-21", "1996-03-30", "1996-05-16", "NONE", "AIR", "lites. fluffily even de"},
{"1", "24027", "1534", "5", "24", "22824.48", "0.10", "0.04", "N", "O", "1996-03-30", "1996-03-14", "1996-04-01", "NONE", "FOB", " pending foxes. slyly re"},
{"1", "15635", "638", "6", "32", "49620.16", "0.07", "0.02", "N", "O", "1996-01-30", "1996-02-07", "1996-02-03", "DELIVER IN PERSON", "MAIL", "arefully slyly ex"},
{"2", "106170", "1191", "1", "38", "44694.46", "0.00", "0.05", "N", "O", "1997-01-28", "1997-01-14", "1997-02-02", "TAKE BACK RETURN", "RAIL", "ven requests. deposits breach a"},
{"3", "4297", "1798", "1", "45", "54058.05", "0.06", "0.00", "R", "F", "1994-02-02", "1994-01-04", "1994-02-23", "NONE", "AIR", "ongside of the furiously brave acco"},
{"3", "19036", "6540", "2", "49", "46796.47", "0.10", "0.00", "R", "F", "1993-11-09", "1993-12-20", "1993-11-24", "TAKE BACK RETURN", "RAIL", " unusual accounts. eve"},
{"3", "128449", "3474", "3", "27", "39890.88", "0.06", "0.07", "A", "F", "1994-01-16", "1993-11-22", "1994-01-23", "DELIVER IN PERSON", "SHIP", "nal foxes wake."},
}
b.SetBytes(120) // Raw input size. With 8 indexes, expect more on output side.
stmt, err := parser.ParseOne(`CREATE TABLE lineitem (
l_orderkey INT8 NOT NULL,
l_partkey INT8 NOT NULL,
l_suppkey INT8 NOT NULL,
l_linenumber INT8 NOT NULL,
l_quantity DECIMAL(15,2) NOT NULL,
l_extendedprice DECIMAL(15,2) NOT NULL,
l_discount DECIMAL(15,2) NOT NULL,
l_tax DECIMAL(15,2) NOT NULL,
l_returnflag CHAR(1) NOT NULL,
l_linestatus CHAR(1) NOT NULL,
l_shipdate DATE NOT NULL,
l_commitdate DATE NOT NULL,
l_receiptdate DATE NOT NULL,
l_shipinstruct CHAR(25) NOT NULL,
l_shipmode CHAR(10) NOT NULL,
l_comment VARCHAR(44) NOT NULL,
PRIMARY KEY (l_orderkey, l_linenumber),
INDEX l_ok (l_orderkey ASC),
INDEX l_pk (l_partkey ASC),
INDEX l_sk (l_suppkey ASC),
INDEX l_sd (l_shipdate ASC),
INDEX l_cd (l_commitdate ASC),
INDEX l_rd (l_receiptdate ASC),
INDEX l_pk_sk (l_partkey ASC, l_suppkey ASC),
INDEX l_sk_pk (l_suppkey ASC, l_partkey ASC)
)`)
if err != nil {
b.Fatal(err)
}
create := stmt.AST.(*tree.CreateTable)
semaCtx := tree.MakeSemaContext()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
tableDesc, err := MakeSimpleTableDescriptor(ctx, &semaCtx, st, create, descpb.ID(100), keys.PublicSchemaID,
descpb.ID(100), NoFKs, 1)
if err != nil {
b.Fatal(err)
}
kvCh := make(chan row.KVBatch)
// no-op drain kvs channel.
go func() {
for range kvCh {
}
}()
cols := make(tree.NameList, len(tableDesc.Columns))
for i, col := range tableDesc.Columns {
cols[i] = tree.Name(col.Name)
}
r, err := newPgCopyReader(roachpb.PgCopyOptions{
Delimiter: '\t',
Null: `\N`,
MaxRowSize: 4096,
}, kvCh, 0, 0,
tableDesc.ImmutableCopy().(catalog.TableDescriptor), nil /* targetCols */, &evalCtx)
require.NoError(b, err)
producer := &csvBenchmarkStream{
n: b.N,
pos: 0,
data: tpchLineItemDataRows,
}
pgCopyInput := &fileReader{Reader: producer}
b.ResetTimer()
require.NoError(b, r.readFile(ctx, pgCopyInput, 0, 0, nil))
close(kvCh)
b.ReportAllocs()
}
// FakeResumer calls optional callbacks during the job lifecycle.
type fakeResumer struct {
OnResume func(context.Context) error
FailOrCancel func(context.Context) error
}
var _ jobs.Resumer = fakeResumer{}
func (d fakeResumer) Resume(ctx context.Context, execCtx interface{}) error {
if d.OnResume != nil {
if err := d.OnResume(ctx); err != nil {
return err
}
}
return nil
}
func (d fakeResumer) OnFailOrCancel(ctx context.Context, _ interface{}) error {
if d.FailOrCancel != nil {
return d.FailOrCancel(ctx)
}
return nil
}
// TestImportControlJobRBAC tests that a root user can control any job, but
// a non-admin user can only control jobs which are created by them.
// TODO(adityamaru): Verifying the state of the job after the control command
// has been issued would also be nice, but it makes the test flaky.
func TestImportControlJobRBAC(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer jobs.ResetConstructors()()
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{})
defer tc.Stopper().Stop(ctx)
rootDB := sqlutils.MakeSQLRunner(tc.Conns[0])
registry := tc.Server(0).JobRegistry().(*jobs.Registry)
// Create non-root user.
rootDB.Exec(t, `CREATE USER testuser`)
rootDB.Exec(t, `ALTER ROLE testuser CONTROLJOB`)
pgURL, cleanupFunc := sqlutils.PGUrl(
t, tc.Server(0).ServingSQLAddr(), "TestImportPrivileges-testuser",
url.User("testuser"),
)
defer cleanupFunc()
testuser, err := gosql.Open("postgres", pgURL.String())
if err != nil {
t.Fatal(err)
}
defer testuser.Close()
done := make(chan struct{})
defer close(done)
jobs.RegisterConstructor(jobspb.TypeImport, func(_ *jobs.Job, _ *cluster.Settings) jobs.Resumer {
return fakeResumer{
OnResume: func(ctx context.Context) error {
<-done
return nil
},
FailOrCancel: func(ctx context.Context) error {
<-done
return nil
},
}
})
startLeasedJob := func(t *testing.T, record jobs.Record) *jobs.StartableJob {
job, err := jobs.TestingCreateAndStartJob(ctx, registry, tc.Server(0).DB(), record)
require.NoError(t, err)
return job
}
defaultRecord := jobs.Record{
// Job does not accept an empty Details field, so arbitrarily provide
// ImportDetails.
Details: jobspb.ImportDetails{},
Progress: jobspb.ImportProgress{},
}
for _, tc := range []struct {
name string
controlQuery string
}{
{
"pause",
`PAUSE JOB $1`,
},
{
"cancel",
`CANCEL JOB $1`,
},
{
"resume",
`RESUME JOB $1`,
},
} {
t.Run(tc.name, func(t *testing.T) {
// Start import job as root.
rootJobRecord := defaultRecord
rootJobRecord.Username = security.RootUserName()
rootJob := startLeasedJob(t, rootJobRecord)
// Test root can control root job.
rootDB.Exec(t, tc.controlQuery, rootJob.ID())
require.NoError(t, err)
// Start import job as non-admin user.
nonAdminJobRecord := defaultRecord
nonAdminJobRecord.Username = security.TestUserName()
userJob := startLeasedJob(t, nonAdminJobRecord)
// Test testuser can control testuser job.
_, err := testuser.Exec(tc.controlQuery, userJob.ID())
require.NoError(t, err)
// Start second import job as root.
rootJob2 := startLeasedJob(t, rootJobRecord)
// Start second import job as non-admin user.
userJob2 := startLeasedJob(t, nonAdminJobRecord)
// Test root can control testuser job.
rootDB.Exec(t, tc.controlQuery, userJob2.ID())
require.NoError(t, err)
// Test testuser CANNOT control root job.
_, err = testuser.Exec(tc.controlQuery, rootJob2.ID())
require.True(t, testutils.IsError(err, "only admins can control jobs owned by other admins"))
})
}
}
// TestImportWorkerFailure tests that IMPORT can restart after the failure
// of a worker node.
func TestImportWorkerFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// TODO(mjibson): Although this test passes most of the time it still
// sometimes fails because not all kinds of failures caused by shutting a
// node down are detected and retried.
skip.WithIssue(t, 51793, "flaky due to undetected kinds of failures when the node is shutdown")
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
allowResponse := make(chan struct{})
params := base.TestClusterArgs{}
params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{
TestingResponseFilter: jobutils.BulkOpResponseFilter(&allowResponse),
}
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 3, params)
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(r.URL.Path[1:]))
}
}))
defer srv.Close()
count := 20
urls := make([]string, count)
for i := 0; i < count; i++ {
urls[i] = fmt.Sprintf("'%s/%d'", srv.URL, i)
}
csvURLs := strings.Join(urls, ", ")
query := fmt.Sprintf(`IMPORT TABLE t (i INT8 PRIMARY KEY) CSV DATA (%s) WITH sstsize = '1B'`, csvURLs)
errCh := make(chan error)
go func() {
_, err := conn.Exec(query)
errCh <- err
}()
select {
case allowResponse <- struct{}{}:
case err := <-errCh:
t.Fatalf("%s: query returned before expected: %s", err, query)
}
var jobID jobspb.JobID
sqlDB.QueryRow(t, `SELECT id FROM system.jobs ORDER BY created DESC LIMIT 1`).Scan(&jobID)
// Shut down a node. This should force LoadCSV to fail in its current
// execution. It should detect this as a context canceled error.
tc.StopServer(1)
close(allowResponse)
// We expect the statement to fail.
if err := <-errCh; !testutils.IsError(err, "node failure") {
t.Fatal(err)
}
// But the job should be restarted and succeed eventually.
jobutils.WaitForJob(t, sqlDB, jobID)
sqlDB.CheckQueryResults(t,
`SELECT * FROM t ORDER BY i`,
sqlDB.QueryStr(t, `SELECT * FROM generate_series(0, $1)`, count-1),
)
}
// TestImportLivenessWithRestart tests that a node liveness transition
// during IMPORT correctly resumes after the node executing the job
// becomes non-live (from the perspective of the jobs registry).
//
// Its actual purpose is to address the second bug listed in #22924 about
// the addsstable arguments not in request range. The theory was that the
// restart in that issue was caused by node liveness and that the work
// already performed (the splits and addsstables) somehow caused the second
// error. However this does not appear to be the case, as running many stress
// iterations with differing constants (rows, sstsize, kv.bulk_ingest.batch_size)
// was not able to fail in the way listed by the second bug.
func TestImportLivenessWithRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.WithIssue(t, 51794, "TODO(dt): this relies on chunking done by prior version of IMPORT."+
"Rework this test, or replace it with resume-tests + jobs infra tests.")
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
const nodes = 1
nl := jobs.NewFakeNodeLiveness(nodes)
serverArgs := base.TestServerArgs{
Settings: cluster.MakeTestingClusterSettingsWithVersions(
roachpb.Version{Major: 20, Minor: 1},
roachpb.Version{Major: 20, Minor: 1},
true),
Knobs: base.TestingKnobs{
RegistryLiveness: nl,
},
}
var allowResponse chan struct{}
params := base.TestClusterArgs{ServerArgs: serverArgs}
params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{
TestingResponseFilter: jobutils.BulkOpResponseFilter(&allowResponse),
}
ctx := context.Background()
tc := testcluster.StartTestCluster(t, nodes, params)
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
// Prevent hung HTTP connections in leaktest.
sqlDB.Exec(t, `SET CLUSTER SETTING cloudstorage.timeout = '3s'`)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '300B'`)
sqlDB.Exec(t, `CREATE DATABASE liveness`)
const rows = 5000
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
for i := 0; i < rows; i++ {
fmt.Fprintln(w, i)
}
}
}))
defer srv.Close()
const query = `IMPORT TABLE liveness.t (i INT8 PRIMARY KEY) CSV DATA ($1) WITH sstsize = '500B', experimental_sorted_ingestion`
// Start an IMPORT and wait until it's done one addsstable.
allowResponse = make(chan struct{})
errCh := make(chan error)
go func() {
_, err := conn.Exec(query, srv.URL)
errCh <- err
}()
// Allow many, but not all, addsstables to complete.
for i := 0; i < 50; i++ {
select {
case allowResponse <- struct{}{}:
case err := <-errCh:
t.Fatal(err)
}
}
// Fetch the new job ID and lease since we know it's running now.
var jobID jobspb.JobID
originalLease := &jobspb.Progress{}
{
var expectedLeaseBytes []byte
sqlDB.QueryRow(
t, `SELECT id, progress FROM system.jobs ORDER BY created DESC LIMIT 1`,
).Scan(&jobID, &expectedLeaseBytes)
if err := protoutil.Unmarshal(expectedLeaseBytes, originalLease); err != nil {
t.Fatal(err)
}
}
// addsstable is done, make the node non-live and wait for cancellation
nl.FakeSetExpiration(1, hlc.MinTimestamp)
// Wait for the registry cancel loop to run and cancel the job.
<-nl.SelfCalledCh
<-nl.SelfCalledCh
close(allowResponse)
err := <-errCh
if !testutils.IsError(err, "job .*: node liveness error") {
t.Fatalf("unexpected: %v", err)
}
// Ensure that partial progress has been recorded
partialProgress := jobutils.GetJobProgress(t, sqlDB, jobID)
if len(partialProgress.Details.(*jobspb.Progress_Import).Import.SpanProgress) == 0 {
t.Fatal("no partial import progress detected")
}
// Make the node live again
nl.FakeSetExpiration(1, hlc.MaxTimestamp)
// The registry should now adopt the job and resume it.
jobutils.WaitForJob(t, sqlDB, jobID)
// Verify that the job lease was updated
rescheduledProgress := jobutils.GetJobProgress(t, sqlDB, jobID)
if rescheduledProgress.ModifiedMicros <= originalLease.ModifiedMicros {
t.Fatalf("expecting rescheduled job to have a later modification time: %d vs %d",
rescheduledProgress.ModifiedMicros, originalLease.ModifiedMicros)
}
// Verify that all expected rows are present after a stop/start cycle.
var rowCount int
sqlDB.QueryRow(t, "SELECT count(*) from liveness.t").Scan(&rowCount)
if rowCount != rows {
t.Fatalf("not all rows were present. Expecting %d, had %d", rows, rowCount)
}
// Verify that all write progress coalesced into a single span
// encompassing the entire table.
spans := rescheduledProgress.Details.(*jobspb.Progress_Import).Import.SpanProgress
if len(spans) != 1 {
t.Fatalf("expecting only a single progress span, had %d\n%s", len(spans), spans)
}
// Ensure that an entire table range is marked as complete
tableSpan := roachpb.Span{
Key: keys.MinKey,
EndKey: keys.MaxKey,
}
if !tableSpan.EqualValue(spans[0]) {
t.Fatalf("expected entire table to be marked complete, had %s", spans[0])
}
}
// TestImportLivenessWithLeniency tests that a temporary node liveness
// transition during IMPORT doesn't cancel the job, but allows the
// owning node to continue processing.
func TestImportLivenessWithLeniency(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer jobs.TestingSetAdoptAndCancelIntervals(10*time.Millisecond, 10*time.Millisecond)()
const nodes = 1
nl := jobs.NewFakeNodeLiveness(nodes)
serverArgs := base.TestServerArgs{
Settings: cluster.MakeTestingClusterSettingsWithVersions(
roachpb.Version{Major: 20, Minor: 1},
roachpb.Version{Major: 20, Minor: 1},
true),
Knobs: base.TestingKnobs{
RegistryLiveness: nl,
},
}
var allowResponse chan struct{}
params := base.TestClusterArgs{ServerArgs: serverArgs}
params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{
TestingResponseFilter: jobutils.BulkOpResponseFilter(&allowResponse),
}
ctx := context.Background()
tc := testcluster.StartTestCluster(t, nodes, params)
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
// Prevent hung HTTP connections in leaktest.
sqlDB.Exec(t, `SET CLUSTER SETTING cloudstorage.timeout = '3s'`)
// We want to know exactly how much leniency is configured.
sqlDB.Exec(t, `SET CLUSTER SETTING jobs.registry.leniency = '1m'`)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '300B'`)
sqlDB.Exec(t, `CREATE DATABASE liveness`)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
const rows = 5000
if r.Method == "GET" {
for i := 0; i < rows; i++ {
fmt.Fprintln(w, i)
}
}
}))
defer srv.Close()
const query = `IMPORT TABLE liveness.t (i INT8 PRIMARY KEY) CSV DATA ($1) WITH sstsize = '500B'`
// Start an IMPORT and wait until it's done one addsstable.
allowResponse = make(chan struct{})
errCh := make(chan error)
go func() {
_, err := conn.Exec(query, srv.URL)
errCh <- err
}()
// Allow many, but not all, addsstables to complete.
for i := 0; i < 50; i++ {
select {
case allowResponse <- struct{}{}:
case err := <-errCh:
t.Fatal(err)
}
}
// Fetch the new job ID and lease since we know it's running now.
var jobID jobspb.JobID
originalLease := &jobspb.Payload{}
{
var expectedLeaseBytes []byte
sqlDB.QueryRow(
t, `SELECT id, payload FROM system.jobs ORDER BY created DESC LIMIT 1`,
).Scan(&jobID, &expectedLeaseBytes)
if err := protoutil.Unmarshal(expectedLeaseBytes, originalLease); err != nil {
t.Fatal(err)
}
}
// addsstable is done, make the node slightly tardy.
nl.FakeSetExpiration(1, hlc.Timestamp{
WallTime: hlc.UnixNano() - (15 * time.Second).Nanoseconds(),
})
// Wait for the registry cancel loop to run and not cancel the job.
<-nl.SelfCalledCh
<-nl.SelfCalledCh
close(allowResponse)
// Set the node to be fully live again. This prevents the registry
// from canceling all of the jobs if the test node is saturated
// and the import runs slowly.
nl.FakeSetExpiration(1, hlc.MaxTimestamp)
// Verify that the client didn't see anything amiss.
if err := <-errCh; err != nil {
t.Fatalf("import job should have completed: %s", err)
}
// The job should have completed normally.
jobutils.WaitForJob(t, sqlDB, jobID)
}
// TestImportMVCCChecksums verifies that MVCC checksums are correctly
// computed by issuing a secondary index change that runs a CPut on the
// index. See #23984.
func TestImportMVCCChecksums(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE d`)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
fmt.Fprint(w, "1,1,1")
}
}))
defer srv.Close()
sqlDB.Exec(t, `IMPORT TABLE d.t (
a INT8 PRIMARY KEY,
b INT8,
c INT8,
INDEX (b) STORING (c)
) CSV DATA ($1)`, srv.URL)
sqlDB.Exec(t, `UPDATE d.t SET c = 2 WHERE a = 1`)
}
func TestImportMysql(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.WithIssue(t, 40263)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
files := getMysqldumpTestdata(t)
simple := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.simple, baseDir))}
second := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.second, baseDir))}
multitable := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.wholeDB, baseDir))}
multitableGz := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.wholeDB+".gz", baseDir))}
multitableBz := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(files.wholeDB+".bz2", baseDir))}
const expectSimple, expectSecond, expectEverything = 1 << 0, 1 << 2, 1 << 3
const expectAll = -1
for _, c := range []struct {
name string
expected int
query string
args []interface{}
}{
{`read data only`, expectSimple, `IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea) MYSQLDUMP DATA ($1)`, simple},
{`single table dump`, expectSimple, `IMPORT TABLE simple FROM MYSQLDUMP ($1)`, simple},
{`second table dump`, expectSecond, `IMPORT TABLE second FROM MYSQLDUMP ($1) WITH skip_foreign_keys`, second},
{`simple from multi`, expectSimple, `IMPORT TABLE simple FROM MYSQLDUMP ($1)`, multitable},
{`second from multi`, expectSecond, `IMPORT TABLE second FROM MYSQLDUMP ($1) WITH skip_foreign_keys`, multitable},
{`all from multi`, expectAll, `IMPORT MYSQLDUMP ($1)`, multitable},
{`all from multi gzip`, expectAll, `IMPORT MYSQLDUMP ($1)`, multitableGz},
{`all from multi bzip`, expectAll, `IMPORT MYSQLDUMP ($1)`, multitableBz},
} {
t.Run(c.name, func(t *testing.T) {
sqlDB.Exec(t, `DROP TABLE IF EXISTS simple, second, third, everything CASCADE`)
sqlDB.Exec(t, `DROP SEQUENCE IF EXISTS simple_auto_inc, third_auto_inc`)
sqlDB.Exec(t, c.query, c.args...)
if c.expected&expectSimple != 0 {
if c.name != "read data only" {
sqlDB.Exec(t, "INSERT INTO simple (s) VALUES ('auto-inc')")
}
for idx, row := range sqlDB.QueryStr(t, "SELECT * FROM simple ORDER BY i") {
{
if idx == len(simpleTestRows) {
if expected, actual := "auto-inc", row[1]; expected != actual {
t.Fatalf("expected rowi=%s string to be %q, got %q", row[0], expected, actual)
}
continue
}
expected, actual := simpleTestRows[idx].s, row[1]
if expected == injectNull {
expected = "NULL"
}
if expected != actual {
t.Fatalf("expected rowi=%s string to be %q, got %q", row[0], expected, actual)
}
}
{
expected, actual := simpleTestRows[idx].b, row[2]
if expected == nil {
expected = []byte("NULL")
}
if !bytes.Equal(expected, []byte(actual)) {
t.Fatalf("expected rowi=%s bytes to be %q, got %q", row[0], expected, actual)
}
}
}
} else {
sqlDB.ExpectErr(t, "does not exist", `SELECT 1 FROM simple LIMIT 1`)
}
if c.expected&expectSecond != 0 {
res := sqlDB.QueryStr(t, "SELECT * FROM second ORDER BY i")
if expected, actual := secondTableRows, len(res); expected != actual {
t.Fatalf("expected %d, got %d", expected, actual)
}
for _, row := range res {
if i, j := row[0], row[1]; i != "-"+j {
t.Fatalf("expected %s = - %s", i, j)
}
}
} else {
sqlDB.ExpectErr(t, "does not exist", `SELECT 1 FROM second LIMIT 1`)
}
if c.expected&expectEverything != 0 {
res := sqlDB.QueryStr(t, "SELECT i, c, iw, fl, d53, j FROM everything ORDER BY i")
if expected, actual := len(everythingTestRows), len(res); expected != actual {
t.Fatalf("expected %d, got %d", expected, actual)
}
for i := range res {
if got, expected := res[i][0], fmt.Sprintf("%v", everythingTestRows[i].i); got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
if got, expected := res[i][1], everythingTestRows[i].c; got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
if got, expected := res[i][2], fmt.Sprintf("%v", everythingTestRows[i].iw); got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
if got, expected := res[i][3], fmt.Sprintf("%v", everythingTestRows[i].fl); got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
if got, expected := res[i][4], everythingTestRows[i].d53; got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
if got, expected := res[i][5], everythingTestRows[i].j; got != expected {
t.Fatalf("expected %s got %s", expected, got)
}
}
} else {
sqlDB.ExpectErr(t, "does not exist", `SELECT 1 FROM everything LIMIT 1`)
}
})
}
}
// TODO (anzoteh96): this should have been in TestImportMysql, but the
// entire test was skipped. We should move this into TestImportMysql once
// it's unskipped.
func TestImportIntoMysql(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
data := `INSERT INTO t VALUES (1, 2), (3, 4)`
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
defer sqlDB.Exec(t, "DROP TABLE t")
sqlDB.Exec(t, "CREATE TABLE t (a INT, b INT)")
sqlDB.ExpectErr(t,
"MYSQLDUMP file format is currently unsupported by IMPORT INTO",
fmt.Sprintf(`IMPORT INTO t (a, b) MYSQLDUMP DATA (%q)`, srv.URL))
}
func TestImportDelimited(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata", "mysqlout")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
testRows, configs := getMysqlOutfileTestdata(t)
for i, cfg := range configs {
t.Run(cfg.name, func(t *testing.T) {
var opts []interface{}
cmd := fmt.Sprintf(`IMPORT TABLE test%d (i INT8 PRIMARY KEY, s text, b bytea) DELIMITED DATA ($1)`, i)
opts = append(opts, fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(cfg.filename, baseDir)))
var flags []string
if cfg.opts.RowSeparator != '\n' {
opts = append(opts, string(cfg.opts.RowSeparator))
flags = append(flags, fmt.Sprintf("rows_terminated_by = $%d", len(opts)))
}
if cfg.opts.FieldSeparator != '\t' {
opts = append(opts, string(cfg.opts.FieldSeparator))
flags = append(flags, fmt.Sprintf("fields_terminated_by = $%d", len(opts)))
}
if cfg.opts.Enclose == roachpb.MySQLOutfileOptions_Always {
opts = append(opts, string(cfg.opts.Encloser))
flags = append(flags, fmt.Sprintf("fields_enclosed_by = $%d", len(opts)))
}
if cfg.opts.HasEscape {
opts = append(opts, string(cfg.opts.Escape))
flags = append(flags, fmt.Sprintf("fields_escaped_by = $%d", len(opts)))
}
if len(flags) > 0 {
cmd += " WITH " + strings.Join(flags, ", ")
}
sqlDB.Exec(t, cmd, opts...)
for idx, row := range sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM test%d ORDER BY i", i)) {
expected, actual := testRows[idx].s, row[1]
if expected == injectNull {
expected = "NULL"
}
if expected != actual {
t.Fatalf("expected row i=%s string to be %q, got %q", row[0], expected, actual)
}
}
// Test if IMPORT INTO works here by testing that they produce the same
// results as IMPORT TABLE.
t.Run("import-into", func(t *testing.T) {
defer sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE into%d`, i))
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE into%d (i INT8 PRIMARY KEY, s text, b bytea)`, i))
intoCmd := fmt.Sprintf(`IMPORT INTO into%d (i, s, b) DELIMITED DATA ($1)`, i)
if len(flags) > 0 {
intoCmd += " WITH " + strings.Join(flags, ", ")
}
sqlDB.Exec(t, intoCmd, opts...)
importStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM test%d ORDER BY i", i))
intoStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM into%d ORDER BY i", i))
require.Equal(t, importStr, intoStr)
})
t.Run("import-into-target-cols-reordered", func(t *testing.T) {
defer sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE into%d`, i))
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE into%d (b bytea, i INT8 PRIMARY KEY, s text)`, i))
intoCmd := fmt.Sprintf(`IMPORT INTO into%d (i, s, b) DELIMITED DATA ($1)`, i)
if len(flags) > 0 {
intoCmd += " WITH " + strings.Join(flags, ", ")
}
sqlDB.Exec(t, intoCmd, opts...)
colNames := []string{"i", "s", "b"}
for _, colName := range colNames {
importStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT (%s) FROM test%d ORDER BY i", colName, i))
intoStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT (%s) FROM into%d ORDER BY i", colName, i))
require.Equal(t, importStr, intoStr)
}
})
})
}
}
func TestImportPgCopy(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata", "pgcopy")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
testRows, configs := getPgCopyTestdata(t)
for i, cfg := range configs {
t.Run(cfg.name, func(t *testing.T) {
var opts []interface{}
cmd := fmt.Sprintf(`IMPORT TABLE test%d (i INT8 PRIMARY KEY, s text, b bytea) PGCOPY DATA ($1)`, i)
opts = append(opts, fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(cfg.filename, baseDir)))
var flags []string
if cfg.opts.Delimiter != '\t' {
opts = append(opts, string(cfg.opts.Delimiter))
flags = append(flags, fmt.Sprintf("delimiter = $%d", len(opts)))
}
if cfg.opts.Null != `\N` {
opts = append(opts, cfg.opts.Null)
flags = append(flags, fmt.Sprintf("nullif = $%d", len(opts)))
}
if len(flags) > 0 {
cmd += " WITH " + strings.Join(flags, ", ")
}
t.Log(cmd, opts)
sqlDB.Exec(t, cmd, opts...)
for idx, row := range sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM test%d ORDER BY i", i)) {
{
expected, actual := testRows[idx].s, row[1]
if expected == injectNull {
expected = "NULL"
}
if expected != actual {
t.Fatalf("expected row i=%s string to be %q, got %q", row[0], expected, actual)
}
}
{
expected, actual := testRows[idx].b, row[2]
if expected == nil {
expected = []byte("NULL")
}
if !bytes.Equal(expected, []byte(actual)) {
t.Fatalf("expected rowi=%s bytes to be %q, got %q", row[0], expected, actual)
}
}
}
// Test if IMPORT INTO works here by testing that they produce the same
// results as IMPORT TABLE.
t.Run("import-into", func(t *testing.T) {
defer sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE into%d`, i))
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE into%d (i INT8 PRIMARY KEY, s text, b bytea)`, i))
intoCmd := fmt.Sprintf(`IMPORT INTO into%d (i, s, b) PGCOPY DATA ($1)`, i)
if len(flags) > 0 {
intoCmd += " WITH " + strings.Join(flags, ", ")
}
sqlDB.Exec(t, intoCmd, opts...)
importStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM test%d ORDER BY i", i))
intoStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT * FROM into%d ORDER BY i", i))
require.Equal(t, importStr, intoStr)
})
t.Run("import-into-target-cols-reordered", func(t *testing.T) {
defer sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE into%d`, i))
sqlDB.Exec(t, fmt.Sprintf(`CREATE TABLE into%d (b bytea, s text, i INT8 PRIMARY KEY)`, i))
intoCmd := fmt.Sprintf(`IMPORT INTO into%d (i, s, b) PGCOPY DATA ($1)`, i)
if len(flags) > 0 {
intoCmd += " WITH " + strings.Join(flags, ", ")
}
sqlDB.Exec(t, intoCmd, opts...)
colNames := []string{"i", "s", "b"}
for _, colName := range colNames {
importStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT (%s) FROM test%d ORDER BY i", colName, i))
intoStr := sqlDB.QueryStr(t, fmt.Sprintf("SELECT (%s) FROM into%d ORDER BY i", colName, i))
require.Equal(t, importStr, intoStr)
}
})
})
}
}
func TestImportPgDump(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
simplePgTestRows, simpleFile := getSimplePostgresDumpTestdata(t)
simple := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(simpleFile, baseDir))}
secondTableRowCount, secondFile := getSecondPostgresDumpTestdata(t)
second := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(secondFile, baseDir))}
multitableFile := getMultiTablePostgresDumpTestdata(t)
multitable := []interface{}{fmt.Sprintf("nodelocal://0%s", strings.TrimPrefix(multitableFile, baseDir))}
const expectAll, expectSimple, expectSecond = 1, 2, 3
for _, c := range []struct {
name string
expected int
query string
args []interface{}
}{
{
`read data only`,
expectSimple,
`IMPORT TABLE simple (
i INT8,
s text,
b bytea,
CONSTRAINT simple_pkey PRIMARY KEY (i),
UNIQUE INDEX simple_b_s_idx (b, s),
INDEX simple_s_idx (s)
) PGDUMP DATA ($1) WITH ignore_unsupported_statements`,
simple,
},
{`single table dump`, expectSimple, `IMPORT TABLE simple FROM PGDUMP ($1) WITH ignore_unsupported_statements`, simple},
{`second table dump`, expectSecond, `IMPORT TABLE second FROM PGDUMP ($1) WITH ignore_unsupported_statements`, second},
{`simple from multi`, expectSimple, `IMPORT TABLE simple FROM PGDUMP ($1) WITH ignore_unsupported_statements`, multitable},
{`second from multi`, expectSecond, `IMPORT TABLE second FROM PGDUMP ($1) WITH ignore_unsupported_statements`, multitable},
{`all from multi`, expectAll, `IMPORT PGDUMP ($1) WITH ignore_unsupported_statements`, multitable},
} {
t.Run(c.name, func(t *testing.T) {
sqlDB.Exec(t, `DROP TABLE IF EXISTS simple, second`)
sqlDB.Exec(t, c.query, c.args...)
if c.expected == expectSimple || c.expected == expectAll {
// Verify table schema because PKs and indexes are at the bottom of pg_dump.
sqlDB.CheckQueryResults(t, `SHOW CREATE TABLE simple`, [][]string{{
"simple", `CREATE TABLE public.simple (
i INT8 NOT NULL,
s STRING NULL,
b BYTES NULL,
CONSTRAINT simple_pkey PRIMARY KEY (i ASC),
UNIQUE INDEX simple_b_s_idx (b ASC, s ASC),
INDEX simple_s_idx (s ASC),
FAMILY "primary" (i, s, b)
)`,
}})
rows := sqlDB.QueryStr(t, "SELECT * FROM simple ORDER BY i")
if a, e := len(rows), len(simplePostgresTestRows); a != e {
t.Fatalf("got %d rows, expected %d", a, e)
}
for idx, row := range rows {
{
expected, actual := simplePostgresTestRows[idx].s, row[1]
if expected == injectNull {
expected = "NULL"
}
if expected != actual {
t.Fatalf("expected rowi=%s string to be %q, got %q", row[0], expected, actual)
}
}
{
expected, actual := simplePgTestRows[idx].b, row[2]
if expected == nil {
expected = []byte("NULL")
}
if !bytes.Equal(expected, []byte(actual)) {
t.Fatalf("expected rowi=%s bytes to be %q, got %q", row[0], expected, actual)
}
}
}
}
if c.expected == expectSecond || c.expected == expectAll {
// Verify table schema because PKs and indexes are at the bottom of pg_dump.
sqlDB.CheckQueryResults(t, `SHOW CREATE TABLE second`, [][]string{{
"second", `CREATE TABLE public.second (
i INT8 NOT NULL,
s STRING NULL,
CONSTRAINT second_pkey PRIMARY KEY (i ASC),
FAMILY "primary" (i, s)
)`,
}})
res := sqlDB.QueryStr(t, "SELECT * FROM second ORDER BY i")
if expected, actual := secondTableRowCount, len(res); expected != actual {
t.Fatalf("expected %d, got %d", expected, actual)
}
for _, row := range res {
if i, s := row[0], row[1]; i != s {
t.Fatalf("expected %s = %s", i, s)
}
}
}
if c.expected == expectSecond {
sqlDB.ExpectErr(t, "does not exist", `SELECT 1 FROM simple LIMIT 1`)
}
if c.expected == expectSimple {
sqlDB.ExpectErr(t, "does not exist", `SELECT 1 FROM second LIMIT 1`)
}
if c.expected == expectAll {
sqlDB.CheckQueryResults(t, `SHOW CREATE TABLE seqtable`, [][]string{{
"seqtable", `CREATE TABLE public.seqtable (
a INT8 NULL DEFAULT nextval('public.a_seq'::REGCLASS),
b INT8 NULL,
rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(),
CONSTRAINT "primary" PRIMARY KEY (rowid ASC),
FAMILY "primary" (a, b, rowid)
)`,
}})
sqlDB.CheckQueryResults(t, `SHOW CREATE SEQUENCE a_seq`, [][]string{{
"a_seq", `CREATE SEQUENCE public.a_seq MINVALUE 1 MAXVALUE 9223372036854775807 INCREMENT 1 START 1`,
}})
sqlDB.CheckQueryResults(t, `select last_value from a_seq`, [][]string{{"7"}})
sqlDB.CheckQueryResults(t,
`SELECT * FROM seqtable ORDER BY a`,
sqlDB.QueryStr(t, `select a+1, a*10 from generate_series(0, 6) a`),
)
sqlDB.CheckQueryResults(t, `select last_value from a_seq`, [][]string{{"7"}})
// This can sometimes retry, so the next value might not be 8.
sqlDB.Exec(t, `INSERT INTO seqtable (b) VALUES (70)`)
sqlDB.CheckQueryResults(t, `select last_value >= 8 from a_seq`, [][]string{{"true"}})
sqlDB.CheckQueryResults(t,
`SELECT b FROM seqtable WHERE a = (SELECT last_value FROM a_seq)`,
[][]string{{"70"}},
)
}
})
}
t.Run("glob-multi", func(t *testing.T) {
sqlDB.ExpectErr(t, "SQL dump files must be imported individually", `IMPORT PGDUMP 'nodelocal://0/*'`)
})
t.Run("target-cols-reordered", func(t *testing.T) {
data := `
CREATE TABLE "t" ("a" INT, "b" INT DEFAULT 42, "c" INT);
INSERT INTO "t" ("c", "a") VALUES ('1', '2'), ('3', '4');
`
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
defer sqlDB.Exec(t, "DROP TABLE t")
sqlDB.Exec(t, "IMPORT PGDUMP ($1)", srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * from t`, [][]string{{"2", "42", "1"}, {"4", "42", "3"}})
})
t.Run("import-into-not-supported", func(t *testing.T) {
data := `INSERT INTO t VALUES (1, 2), (3, 4)`
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
defer sqlDB.Exec(t, "DROP TABLE t")
sqlDB.Exec(t, "CREATE TABLE t (a INT, b INT)")
sqlDB.ExpectErr(t,
"PGDUMP file format is currently unsupported by IMPORT INTO",
fmt.Sprintf(`IMPORT INTO t (a, b) PGDUMP DATA (%q)`, srv.URL))
})
}
func TestImportPgDumpIgnoredStmts(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1 /* nodes */, base.TestClusterArgs{})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
data := `
-- Statements that CRDB cannot parse.
CREATE TRIGGER conditions_set_updated_at BEFORE UPDATE ON conditions FOR EACH ROW EXECUTE PROCEDURE set_updated_at();
REVOKE ALL ON SEQUENCE knex_migrations_id_seq FROM PUBLIC;
REVOKE ALL ON SEQUENCE knex_migrations_id_seq FROM database;
GRANT ALL ON SEQUENCE knex_migrations_id_seq TO database;
GRANT SELECT ON SEQUENCE knex_migrations_id_seq TO opentrials_readonly;
COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
ALTER AGGREGATE myavg(integer) RENAME TO my_average;
ALTER DOMAIN zipcode SET NOT NULL;
-- Valid statement.
CREATE TABLE foo (id INT);
CREATE FUNCTION public.isnumeric(text) RETURNS boolean
LANGUAGE sql
AS $_$
SELECT $1 ~ '^[0-9]+$'
$_$;
ALTER FUNCTION public.isnumeric(text) OWNER TO roland;
ALTER DEFAULT PRIVILEGES FOR ROLE rolename IN SCHEMA "schemaname" REVOKE ALL ON TABLES FROM rolename;
ALTER TABLE "database"."table" ALTER COLUMN "Id" ADD GENERATED BY DEFAULT AS IDENTITY (
SEQUENCE NAME "database"."sequencename"
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1
);
COPY db.table (col1, col2, col3, col4) FROM '$$PATH$$/3057.dat';
GRANT USAGE ON SCHEMA "schemaname" TO davidt WITH GRANT OPTION;
-- Valid statements.
INSERT INTO foo VALUES (1), (2), (3);
CREATE TABLE t (i INT8);
-- Statements that CRDB can parse, but IMPORT does not support.
-- These are processed during the schema pass of IMPORT.
COMMENT ON TABLE t IS 'This should be skipped';
COMMENT ON DATABASE t IS 'This should be skipped';
COMMENT ON COLUMN t IS 'This should be skipped';
-- Statements that CRDB can parse, but IMPORT does not support.
-- These are processed during the data ingestion pass of IMPORT.
SELECT pg_catalog.set_config('search_path', '', false);
DELETE FROM geometry_columns WHERE f_table_name = 'nyc_census_blocks' AND f_table_schema = 'public';
`
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
t.Run("ignore-unsupported", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE foo; USE foo;")
sqlDB.Exec(t, "IMPORT PGDUMP ($1) WITH ignore_unsupported_statements", srv.URL)
// Check that statements that are not expected to be ignored, are still
// processed.
sqlDB.CheckQueryResults(t, "SELECT * FROM foo", [][]string{{"1"}, {"2"}, {"3"}})
})
t.Run("dont-ignore-unsupported", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE foo1; USE foo1;")
sqlDB.ExpectErr(t, "syntax error", "IMPORT PGDUMP ($1)", srv.URL)
})
t.Run("require-both-unsupported-options", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE foo2; USE foo2;")
ignoredLog := `userfile:///ignore`
sqlDB.ExpectErr(t, "cannot log unsupported PGDUMP stmts without `ignore_unsupported_statements` option",
"IMPORT PGDUMP ($1) WITH log_ignored_statements=$2", srv.URL, ignoredLog)
})
t.Run("log-unsupported-stmts", func(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE foo3; USE foo3;")
ignoredLog := `userfile:///ignore`
defer testingSetMaxLogIgnoredImportStatements(10 /* maxLogSize */)()
var importJobID int
var unused interface{}
sqlDB.QueryRow(t, "IMPORT PGDUMP ($1) WITH ignore_unsupported_statements, "+
"log_ignored_statements=$2", srv.URL, ignoredLog).Scan(&importJobID, &unused, &unused,
&unused, &unused, &unused)
// Check that statements which are not expected to be ignored, are still
// processed.
sqlDB.CheckQueryResults(t, "SELECT * FROM foo", [][]string{{"1"}, {"2"}, {"3"}})
// Read the unsupported log and verify its contents.
store, err := cloudimpl.ExternalStorageFromURI(ctx, ignoredLog,
base.ExternalIODirConfig{},
tc.Servers[0].ClusterSettings(),
blobs.TestEmptyBlobClientFactory,
security.RootUserName(),
tc.Servers[0].InternalExecutor().(*sql.InternalExecutor), tc.Servers[0].DB())
require.NoError(t, err)
defer store.Close()
// We expect there to be two log files since we have 13 unsupported statements.
dirName := fmt.Sprintf("import%d", importJobID)
checkFiles := func(expectedFileContent []string, logSubdir string) {
files, err := store.ListFiles(ctx, fmt.Sprintf("*/%s/*", logSubdir))
require.NoError(t, err)
for i, file := range files {
require.Equal(t, file, path.Join(dirName, logSubdir, fmt.Sprintf("%d.log", i)))
content, err := store.ReadFile(ctx, file)
require.NoError(t, err)
descBytes, err := ioutil.ReadAll(content)
require.NoError(t, err)
require.Equal(t, []byte(expectedFileContent[i]), descBytes)
}
}
schemaFileContents := []string{
`create trigger: could not be parsed
revoke privileges on sequence: could not be parsed
revoke privileges on sequence: could not be parsed
grant privileges on sequence: could not be parsed
grant privileges on sequence: could not be parsed
comment on extension: could not be parsed
create extension if not exists with: could not be parsed
alter aggregate: could not be parsed
alter domain: could not be parsed
create function: could not be parsed
`,
`alter function: could not be parsed
alter default privileges: could not be parsed
alter table alter column add: could not be parsed
copy from unsupported format: could not be parsed
grant privileges on schema with: could not be parsed
COMMENT ON TABLE t IS 'This should be skipped': unsupported by IMPORT
COMMENT ON DATABASE t IS 'This should be skipped': unsupported by IMPORT
COMMENT ON COLUMN t IS 'This should be skipped': unsupported by IMPORT
unsupported function call: set_config in stmt: SELECT set_config('search_path', '', false): unsupported by IMPORT
`,
}
checkFiles(schemaFileContents, pgDumpUnsupportedSchemaStmtLog)
ingestionFileContents := []string{
`unsupported 3 fn args in select: ['search_path' '' false]: unsupported by IMPORT
unsupported *tree.Delete statement: DELETE FROM geometry_columns WHERE (f_table_name = 'nyc_census_blocks') AND (f_table_schema = 'public'): unsupported by IMPORT
`,
}
checkFiles(ingestionFileContents, pgDumpUnsupportedDataStmtLog)
})
}
// TestImportPgDumpGeo tests that a file with SQLFn classes can be
// imported. These are functions like AddGeometryColumn which create and
// execute SQL when called (!). They are, for example, used by shp2pgsql
// (https://manpages.debian.org/stretch/postgis/shp2pgsql.1.en.html).
func TestImportPgDumpGeo(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const nodes = 1
ctx := context.Background()
baseDir := filepath.Join("testdata", "pgdump")
args := base.TestServerArgs{ExternalIODir: baseDir}
t.Run("geo_shp2pgsql.sql", func(t *testing.T) {
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `CREATE DATABASE importdb; SET DATABASE = importdb`)
sqlDB.Exec(t, "IMPORT PGDUMP 'nodelocal://0/geo_shp2pgsql.sql' WITH ignore_unsupported_statements")
sqlDB.Exec(t, `CREATE DATABASE execdb; SET DATABASE = execdb`)
geoSQL, err := ioutil.ReadFile(filepath.Join(baseDir, "geo_shp2pgsql.sql"))
if err != nil {
t.Fatal(err)
}
sqlDB.Exec(t, string(geoSQL))
// Verify both created tables are identical.
importCreate := sqlDB.QueryStr(t, "SELECT create_statement FROM [SHOW CREATE importdb.nyc_census_blocks]")
// Families are slightly different due to rowid showing up in exec but
// not import (possibly due to the ALTER TABLE statement that makes
// gid a primary key), so add that into import to match exec.
importCreate[0][0] = strings.Replace(importCreate[0][0], "boroname, geom", "boroname, rowid, geom", 1)
// The rowid column is implicitly created as ALTER PRIMARY KEY only comes into effect later.
// As such, insert the line.
importCreate[0][0] = strings.Replace(
importCreate[0][0],
"boroname VARCHAR(32) NULL",
"boroname VARCHAR(32) NULL,\n\trowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid()",
1,
)
sqlDB.CheckQueryResults(t, "SELECT create_statement FROM [SHOW CREATE execdb.nyc_census_blocks]", importCreate)
importCols := "blkid, popn_total, popn_white, popn_black, popn_nativ, popn_asian, popn_other, boroname"
importSelect := sqlDB.QueryStr(t, fmt.Sprintf(
"SELECT (%s) FROM importdb.nyc_census_blocks ORDER BY PRIMARY KEY importdb.nyc_census_blocks",
importCols,
))
sqlDB.CheckQueryResults(t, fmt.Sprintf(
"SELECT (%s) FROM execdb.nyc_census_blocks ORDER BY PRIMARY KEY execdb.nyc_census_blocks",
importCols,
), importSelect)
})
t.Run("geo_ogr2ogr.sql", func(t *testing.T) {
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `CREATE DATABASE importdb; SET DATABASE = importdb`)
sqlDB.Exec(t, "IMPORT PGDUMP 'nodelocal://0/geo_ogr2ogr.sql' WITH ignore_unsupported_statements")
sqlDB.Exec(t, `CREATE DATABASE execdb; SET DATABASE = execdb`)
geoSQL, err := ioutil.ReadFile(filepath.Join(baseDir, "geo_ogr2ogr.sql"))
if err != nil {
t.Fatal(err)
}
// We cannot process DELETE FROM geometry_columns statement, so ignore it.
replacedSQL := regexp.MustCompile("DELETE FROM[^;]*").ReplaceAll(geoSQL, []byte(""))
sqlDB.Exec(t, string(replacedSQL))
// Verify both created tables are identical.
importCreate := sqlDB.QueryStr(t, `SELECT create_statement FROM [SHOW CREATE importdb."HydroNode"]`)
sqlDB.CheckQueryResults(t, `SELECT create_statement FROM [SHOW CREATE execdb."HydroNode"]`, importCreate)
})
}
func TestImportPgDumpDropTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
var data string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
_, _ = w.Write([]byte(data))
}
}))
defer srv.Close()
// If the target table for a DROP exists, we throw an error.
t.Run("table exists", func(t *testing.T) {
// Set up table `t` exists for testing.
sqlDB.Exec(t, `DROP TABLE IF EXISTS t; CREATE TABLE t (a INT);`)
// Import PGDump data which includes DROP TABLE.
data = `DROP TABLE t; CREATE TABLE t (a INT); INSERT INTO t VALUES (4);`
sqlDB.ExpectErr(t, `drop table "t" and then retry the import`, `IMPORT PGDUMP ($1)`, srv.URL)
// Also expect error on existing table with IF EXISTS.
data = `DROP TABLE IF EXISTS t; CREATE TABLE t (a INT); INSERT INTO t VALUES (4);`
sqlDB.ExpectErr(t, `drop table "t" and then retry the import`, `IMPORT PGDUMP ($1)`, srv.URL)
// Cleanup.
sqlDB.Exec(t, `DROP TABLE t`)
})
// If the target table for a DROP does not exist, we ignore the statement.
t.Run("table does not exist", func(t *testing.T) {
// Set up table `t` does not exist for testing.
sqlDB.Exec(t, `DROP TABLE IF EXISTS t;`)
// No error should be thrown with DROP statement.
data = `DROP TABLE t; CREATE TABLE t (a INT); INSERT INTO t VALUES (4);`
expected := [][]string{{"4"}}
sqlDB.Exec(t, `IMPORT PGDUMP ($1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`, expected)
// Drop the table `t` that pgdump imported.
// Now table `t` does not exist for the IF EXISTS example.
sqlDB.Exec(t, `DROP TABLE t;`)
// Also expect no errors and successful import with IF EXISTS.
data = `DROP TABLE IF EXISTS t; CREATE TABLE t (a INT); INSERT INTO t VALUES (4);`
sqlDB.Exec(t, `IMPORT PGDUMP ($1)`, srv.URL)
sqlDB.CheckQueryResults(t, `SELECT * FROM t`, expected)
// Cleanup.
sqlDB.Exec(t, `DROP TABLE t`)
})
t.Run("multiple tables and drops", func(t *testing.T) {
// Set up.
sqlDB.Exec(t, `DROP TABLE IF EXISTS t, u;`)
// Import table `t` successfully.
data = `DROP TABLE t; CREATE TABLE t (a INT)`
sqlDB.Exec(t, `IMPORT PGDUMP ($1)`, srv.URL)
// Table `u` does not exist, so create it successfully.
// Table `t` exists, so an error is thrown for table `t`.
data = `DROP TABLE u;
CREATE TABLE u (a INT);
INSERT INTO u VALUES (55);
DROP TABLE t;`
sqlDB.ExpectErr(t, `drop table "t" and then retry the import`, `IMPORT PGDUMP ($1)`, srv.URL)
// Since the PGDump failed on error, table `u` should not exist.
sqlDB.ExpectErr(t, `does not exist`, `SELECT * FROM u`)
})
}
func TestImportPgDumpSchemas(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const nodes = 1
ctx := context.Background()
baseDir := filepath.Join("testdata", "pgdump")
args := base.TestServerArgs{ExternalIODir: baseDir}
// Simple schema test which creates 3 schemas with a single `test` table in
// each schema.
t.Run("schema.sql", func(t *testing.T) {
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `CREATE DATABASE schemadb; SET DATABASE = schemadb`)
sqlDB.Exec(t, "IMPORT PGDUMP 'nodelocal://0/schema.sql' WITH ignore_unsupported_statements")
// Check that we have imported 4 schemas.
expectedSchemaNames := [][]string{{"bar"}, {"baz"}, {"foo"}, {"public"}}
sqlDB.CheckQueryResults(t,
`SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner IS NOT NULL ORDER BY schema_name`,
expectedSchemaNames)
// Check that we have a test table in each schema with the expected content.
expectedContent := [][]string{{"1", "abc"}, {"2", "def"}}
expectedTableName := "test"
expectedTableName2 := "test2"
expectedSeqName := "testseq"
sqlDB.CheckQueryResults(t, `SELECT schema_name,
table_name FROM [SHOW TABLES] ORDER BY (schema_name, table_name)`,
[][]string{{"bar", expectedTableName}, {"bar", expectedTableName2}, {"bar", expectedSeqName},
{"baz", expectedTableName}, {"foo", expectedTableName}, {"public", expectedTableName}})
for _, schemaCollection := range expectedSchemaNames {
for _, schema := range schemaCollection {
sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s.%s`, schema, expectedTableName),
expectedContent)
}
}
// There should be two jobs, the import and a job updating the parent
// database descriptor.
sqlDB.CheckQueryResults(t, `SELECT job_type, status FROM [SHOW JOBS] ORDER BY job_type`,
[][]string{{"IMPORT", "succeeded"}, {"SCHEMA CHANGE", "succeeded"}})
// Attempt to rename one of the imported schema's so as to verify that
// parent database descriptor has been updated with information about the
// imported schemas.
sqlDB.Exec(t, `ALTER SCHEMA foo RENAME TO biz`)
// Ensure that FK relationship works fine with UDS.
sqlDB.Exec(t, `INSERT INTO bar.test VALUES (100, 'a')`)
sqlDB.ExpectErr(t, "violates foreign key constraint \"testfk\"", `INSERT INTO bar.test2 VALUES (101, 'a')`)
})
t.Run("target-table-schema.sql", func(t *testing.T) {
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `CREATE DATABASE schemadb; SET DATABASE = schemadb`)
sqlDB.ExpectErr(t, "does not exist: \"schemadb.bar.test\"",
"IMPORT TABLE schemadb.bar.test FROM PGDUMP ('nodelocal://0/schema."+
"sql') WITH ignore_unsupported_statements")
// Create the user defined schema so that we can get past the "not found"
// error.
// We still expect an error as we do not support importing a target table in
// a UDS.
sqlDB.Exec(t, `CREATE SCHEMA bar`)
sqlDB.ExpectErr(t, "cannot use IMPORT with a user defined schema",
"IMPORT TABLE schemadb.bar.test FROM PGDUMP ('nodelocal://0/schema."+
"sql') WITH ignore_unsupported_statements")
// We expect the import of a target table in the public schema to work.
for _, target := range []string{"schemadb.public.test", "schemadb.test", "test"} {
sqlDB.Exec(t, fmt.Sprintf("IMPORT TABLE %s FROM PGDUMP ('nodelocal://0/schema."+
"sql') WITH ignore_unsupported_statements", target))
// Check that we have a test table in each schema with the expected content.
expectedContent := [][]string{{"1", "abc"}, {"2", "def"}}
expectedTableName := "test"
sqlDB.CheckQueryResults(t, `SELECT schema_name,
table_name FROM [SHOW TABLES] ORDER BY (schema_name, table_name)`,
[][]string{{"public", expectedTableName}})
// Check that the target table in the public schema was imported correctly.
sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s`, expectedTableName), expectedContent)
sqlDB.Exec(t, `DROP TABLE schemadb.public.test`)
}
sqlDB.CheckQueryResults(t,
`SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner <> 'NULL' ORDER BY schema_name`,
[][]string{{"bar"}, {"public"}})
})
t.Run("inject-error-ensure-cleanup", func(t *testing.T) {
defer gcjob.SetSmallMaxGCIntervalForTest()()
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
kvDB := tc.Server(0).DB()
beforeImport, err := tree.MakeDTimestampTZ(tc.Server(0).Clock().Now().GoTime(), time.Millisecond)
if err != nil {
t.Fatal(err)
}
for i := range tc.Servers {
tc.Servers[i].JobRegistry().(*jobs.Registry).TestingResumerCreationKnobs =
map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*importResumer)
r.testingKnobs.afterImport = func(_ backupccl.RowCount) error {
return errors.New("testing injected failure")
}
return r
},
}
}
sqlDB.Exec(t, `CREATE DATABASE failedimportpgdump; SET DATABASE = failedimportpgdump`)
// Hit a failure during import.
sqlDB.ExpectErr(
t, `testing injected failure`, `IMPORT PGDUMP 'nodelocal://0/schema.sql' WITH ignore_unsupported_statements`,
)
// Nudge the registry to quickly adopt the job.
tc.Server(0).JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
dbID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "failedimportpgdump")
// In the case of the test, the ID of the 3 schemas that will be cleaned up
// due to the failed import will be consecutive IDs after the ID of the
// empty database it was created in.
schemaIDs := []descpb.ID{descpb.ID(dbID + 1), descpb.ID(dbID + 2), descpb.ID(dbID + 3)}
// The table IDs are allocated after the schemas are created. There is one
// extra table in the "public" schema.
tableIDs := []descpb.ID{descpb.ID(dbID + 4), descpb.ID(dbID + 5), descpb.ID(dbID + 6),
descpb.ID(dbID + 7)}
// At this point we expect to see three jobs related to the cleanup.
// - SCHEMA CHANGE GC job for the table cleanup.
// - SCHEMA CHANGE job to drop the schemas.
// - SCHEMA CHANGE job to update the database descriptor with dropped
// schemas.
// Ensure that a GC job was created, and wait for it to finish.
doneGCQuery := fmt.Sprintf(
"SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND created > %s",
"SCHEMA CHANGE GC", jobs.StatusSucceeded, beforeImport.String(),
)
doneSchemaDropQuery := fmt.Sprintf(
"SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND description"+
" LIKE '%s'", "SCHEMA CHANGE", jobs.StatusSucceeded, "dropping schemas%")
doneDatabaseUpdateQuery := fmt.Sprintf(
"SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND description"+
" LIKE '%s'", "SCHEMA CHANGE", jobs.StatusSucceeded, "updating parent database%")
sqlDB.CheckQueryResultsRetry(t, doneGCQuery, [][]string{{"1"}})
sqlDB.CheckQueryResultsRetry(t, doneSchemaDropQuery, [][]string{{"1"}})
sqlDB.CheckQueryResultsRetry(t, doneDatabaseUpdateQuery, [][]string{{"1"}})
for _, schemaID := range schemaIDs {
// Expect that the schema descriptor is deleted.
if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
_, err := catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, schemaID)
if !testutils.IsError(err, "descriptor not found") {
return err
}
return nil
}); err != nil {
t.Fatal(err)
}
}
for _, tableID := range tableIDs {
// Expect that the table descriptor is deleted.
if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
_, err := catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, tableID)
if !testutils.IsError(err, "descriptor not found") {
return err
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// As a final sanity check that the schemas have been removed.
sqlDB.CheckQueryResults(t, `SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner IS NOT NULL`,
[][]string{{"public"}})
// Check that the database descriptor has been updated with the removed schemas.
sqlDB.ExpectErr(t, "unknown schema \"foo\"", `ALTER SCHEMA foo RENAME TO biz`)
})
}
func TestImportCockroachDump(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, "IMPORT PGDUMP ($1) WITH ignore_unsupported_statements", "nodelocal://0/cockroachdump/dump.sql")
sqlDB.CheckQueryResults(t, "SELECT * FROM t ORDER BY i", [][]string{
{"1", "test"},
{"2", "other"},
})
sqlDB.CheckQueryResults(t, "SELECT * FROM a", [][]string{
{"2"},
})
sqlDB.CheckQueryResults(t, "SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE t", [][]string{
{"primary", "-6413178410144704641"},
{"t_t_idx", "-4841734847805280813"},
})
sqlDB.CheckQueryResults(t, "SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE a", [][]string{
{"primary", "-5808590958014384147"},
})
sqlDB.CheckQueryResults(t, "SHOW CREATE TABLE t", [][]string{
{"t", `CREATE TABLE public.t (
i INT8 NOT NULL,
t STRING NULL,
CONSTRAINT "primary" PRIMARY KEY (i ASC),
INDEX t_t_idx (t ASC),
FAMILY "primary" (i, t)
)`},
})
sqlDB.CheckQueryResults(t, "SHOW CREATE TABLE a", [][]string{
{"a", `CREATE TABLE public.a (
i INT8 NOT NULL,
CONSTRAINT "primary" PRIMARY KEY (i ASC),
CONSTRAINT fk_i_ref_t FOREIGN KEY (i) REFERENCES public.t(i) NOT VALID,
FAMILY "primary" (i)
)`},
})
}
func TestCreateStatsAfterImport(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer func(oldRefreshInterval, oldAsOf time.Duration) {
stats.DefaultRefreshInterval = oldRefreshInterval
stats.DefaultAsOfTime = oldAsOf
}(stats.DefaultRefreshInterval, stats.DefaultAsOfTime)
stats.DefaultRefreshInterval = time.Millisecond
stats.DefaultAsOfTime = time.Microsecond
const nodes = 1
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=true`)
sqlDB.Exec(t, "IMPORT PGDUMP ($1) WITH ignore_unsupported_statements", "nodelocal://0/cockroachdump/dump.sql")
// Verify that statistics have been created.
sqlDB.CheckQueryResultsRetry(t,
`SELECT statistics_name, column_names, row_count, distinct_count, null_count
FROM [SHOW STATISTICS FOR TABLE t]`,
[][]string{
{"__auto__", "{i}", "2", "2", "0"},
{"__auto__", "{t}", "2", "2", "0"},
})
sqlDB.CheckQueryResultsRetry(t,
`SELECT statistics_name, column_names, row_count, distinct_count, null_count
FROM [SHOW STATISTICS FOR TABLE a]`,
[][]string{
{"__auto__", "{i}", "1", "1", "0"},
})
}
func TestImportAvro(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata", "avro")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
simpleOcf := fmt.Sprintf("nodelocal://0/%s", "simple.ocf")
simpleSchemaURI := fmt.Sprintf("nodelocal://0/%s", "simple-schema.json")
simpleJSON := fmt.Sprintf("nodelocal://0/%s", "simple-sorted.json")
simplePrettyJSON := fmt.Sprintf("nodelocal://0/%s", "simple-sorted.pjson")
simpleBinRecords := fmt.Sprintf("nodelocal://0/%s", "simple-sorted-records.avro")
tableSchema := fmt.Sprintf("nodelocal://0/%s", "simple-schema.sql")
data, err := ioutil.ReadFile("testdata/avro/simple-schema.json")
if err != nil {
t.Fatal(err)
}
simpleSchema := string(data)
tests := []struct {
name string
sql string
create string
args []interface{}
err bool
}{
{
name: "import-ocf",
sql: "IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea) AVRO DATA ($1)",
args: []interface{}{simpleOcf},
},
{
name: "import-ocf-into-table",
sql: "IMPORT INTO simple AVRO DATA ($1)",
create: "CREATE TABLE simple (i INT8 PRIMARY KEY, s text, b bytea)",
args: []interface{}{simpleOcf},
},
{
name: "import-ocf-into-table-with-strict-validation",
sql: "IMPORT INTO simple AVRO DATA ($1) WITH strict_validation",
create: "CREATE TABLE simple (i INT8, s text, b bytea)",
args: []interface{}{simpleOcf},
},
{
name: "import-ocf-create-using",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2)",
args: []interface{}{tableSchema, simpleOcf},
},
{
name: "import-json-records",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2) WITH data_as_json_records, schema_uri=$3",
args: []interface{}{tableSchema, simpleJSON, simpleSchemaURI},
},
{
name: "import-json-records-into-table-ignores-extra-fields",
sql: "IMPORT INTO simple AVRO DATA ($1) WITH data_as_json_records, schema_uri=$2",
create: "CREATE TABLE simple (i INT8 PRIMARY KEY)",
args: []interface{}{simpleJSON, simpleSchemaURI},
},
{
name: "import-json-records-inline-schema",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2) WITH data_as_json_records, schema=$3",
args: []interface{}{tableSchema, simpleJSON, simpleSchema},
},
{
name: "import-json-pretty-printed-records",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2) WITH data_as_json_records, schema_uri=$3",
args: []interface{}{tableSchema, simplePrettyJSON, simpleSchemaURI},
},
{
name: "import-avro-fragments",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2) WITH data_as_binary_records, records_terminated_by='', schema_uri=$3",
args: []interface{}{tableSchema, simpleBinRecords, simpleSchemaURI},
},
{
name: "fail-import-expect-ocf-got-json",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2)",
args: []interface{}{tableSchema, simpleJSON},
err: true,
},
{
name: "relaxed-import-sets-missing-fields",
sql: "IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea, z int) AVRO DATA ($1)",
args: []interface{}{simpleOcf},
},
{
name: "relaxed-import-ignores-extra-fields",
sql: "IMPORT TABLE simple (i INT8 PRIMARY KEY) AVRO DATA ($1)",
args: []interface{}{simpleOcf},
},
{
name: "strict-import-errors-missing-fields",
sql: "IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea, z int) AVRO DATA ($1) WITH strict_validation",
args: []interface{}{simpleOcf},
err: true,
},
{
name: "strict-import-errors-extra-fields",
sql: "IMPORT TABLE simple (i INT8 PRIMARY KEY) AVRO DATA ($1) WITH strict_validation",
args: []interface{}{simpleOcf},
err: true,
},
}
for i, test := range tests {
t.Run(test.name, func(t *testing.T) {
// Play a bit with producer/consumer batch sizes.
defer TestingSetParallelImporterReaderBatchSize(13 * i)()
_, err := sqlDB.DB.ExecContext(context.Background(), `DROP TABLE IF EXISTS simple CASCADE`)
require.NoError(t, err)
if len(test.create) > 0 {
_, err := sqlDB.DB.ExecContext(context.Background(), test.create)
require.NoError(t, err)
}
_, err = sqlDB.DB.ExecContext(context.Background(), test.sql, test.args...)
if test.err {
if err == nil {
t.Error("expected error, but alas")
}
return
}
require.NoError(t, err)
var numRows int
sqlDB.QueryRow(t, `SELECT count(*) FROM simple`).Scan(&numRows)
if numRows == 0 {
t.Error("expected some rows after import")
}
})
}
t.Run("user-defined-schemas", func(t *testing.T) {
sqlDB.Exec(t, `CREATE SCHEMA myschema`)
sqlDB.Exec(t, `CREATE TABLE myschema.simple (i INT8 PRIMARY KEY, s text, b bytea)`)
sqlDB.Exec(t, `IMPORT INTO myschema.simple (i, s, b) AVRO DATA ($1)`, simpleOcf)
var numRows int
sqlDB.QueryRow(t, `SELECT count(*) FROM myschema.simple`).Scan(&numRows)
require.True(t, numRows > 0)
})
}
func TestImportMultiRegion(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
baseDir := filepath.Join("testdata")
_, sqlDB, cleanup := multiregionccltestutils.TestingCreateMultiRegionCluster(
t, 1 /* numServers */, base.TestingKnobs{}, &baseDir,
)
defer cleanup()
_, err := sqlDB.Exec(`SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
require.NoError(t, err)
// Create the databases
_, err = sqlDB.Exec(`CREATE DATABASE foo`)
require.NoError(t, err)
_, err = sqlDB.Exec(`CREATE DATABASE multi_region PRIMARY REGION "us-east1"`)
require.NoError(t, err)
simpleOcf := fmt.Sprintf("nodelocal://0/avro/%s", "simple.ocf")
// Table schemas for USING
tableSchemaMR := fmt.Sprintf("nodelocal://0/avro/%s", "simple-schema-multi-region.sql")
tableSchemaMRRegionalByRow := fmt.Sprintf("nodelocal://0/avro/%s",
"simple-schema-multi-region-regional-by-row.sql")
viewsAndSequencesTestCases := []struct {
desc string
importSQL string
expected map[string]string
}{
{
desc: "pgdump",
importSQL: `IMPORT PGDUMP 'nodelocal://0/pgdump/views_and_sequences.sql' WITH ignore_unsupported_statements`,
expected: map[string]string{
"tbl": "REGIONAL BY TABLE IN PRIMARY REGION",
"s": "REGIONAL BY TABLE IN PRIMARY REGION",
// views are ignored.
},
},
{
desc: "mysqldump",
importSQL: `IMPORT MYSQLDUMP 'nodelocal://0/mysqldump/views_and_sequences.sql'`,
expected: map[string]string{
"tbl": "REGIONAL BY TABLE IN PRIMARY REGION",
"tbl_auto_inc": "REGIONAL BY TABLE IN PRIMARY REGION",
// views are ignored.
},
},
}
for _, tc := range viewsAndSequencesTestCases {
t.Run(tc.desc, func(t *testing.T) {
_, err = sqlDB.Exec(`USE multi_region`)
require.NoError(t, err)
defer func() {
_, err := sqlDB.Exec(`
DROP TABLE IF EXISTS tbl;
DROP SEQUENCE IF EXISTS s;
DROP SEQUENCE IF EXISTS table_auto_inc;
DROP VIEW IF EXISTS v`,
)
require.NoError(t, err)
}()
_, err = sqlDB.Exec(tc.importSQL)
require.NoError(t, err)
rows, err := sqlDB.Query("SELECT table_name, locality FROM [SHOW TABLES] ORDER BY table_name")
require.NoError(t, err)
results := make(map[string]string)
for rows.Next() {
require.NoError(t, rows.Err())
var tableName, locality string
require.NoError(t, rows.Scan(&tableName, &locality))
results[tableName] = locality
}
require.Equal(t, tc.expected, results)
})
}
t.Run("avro", func(t *testing.T) {
tests := []struct {
name string
db string
table string
sql string
create string
args []interface{}
errString string
}{
{
name: "import-create-using-multi-region-to-non-multi-region-database",
db: "foo",
table: "simple",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2)",
args: []interface{}{tableSchemaMR, simpleOcf},
errString: "cannot write descriptor for multi-region table",
},
{
name: "import-create-using-multi-region-regional-by-table-to-multi-region-database",
db: "multi_region",
table: "simple",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2)",
args: []interface{}{tableSchemaMR, simpleOcf},
},
{
name: "import-create-using-multi-region-regional-by-row-to-multi-region-database",
db: "multi_region",
table: "simple",
sql: "IMPORT TABLE simple CREATE USING $1 AVRO DATA ($2)",
args: []interface{}{tableSchemaMRRegionalByRow, simpleOcf},
errString: "IMPORT to REGIONAL BY ROW table not supported",
},
{
name: "import-into-multi-region-regional-by-row-to-multi-region-database",
db: "multi_region",
table: "mr_regional_by_row",
create: "CREATE TABLE mr_regional_by_row (i INT8 PRIMARY KEY, s text, b bytea) LOCALITY REGIONAL BY ROW",
sql: "IMPORT INTO mr_regional_by_row AVRO DATA ($1)",
args: []interface{}{simpleOcf},
errString: "IMPORT into REGIONAL BY ROW table not supported",
},
{
name: "import-into-using-multi-region-global-to-multi-region-database",
db: "multi_region",
table: "mr_global",
create: "CREATE TABLE mr_global (i INT8 PRIMARY KEY, s text, b bytea) LOCALITY GLOBAL",
sql: "IMPORT INTO mr_global AVRO DATA ($1)",
args: []interface{}{simpleOcf},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, err = sqlDB.Exec(fmt.Sprintf(`SET DATABASE = %q`, test.db))
require.NoError(t, err)
_, err = sqlDB.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %q CASCADE", test.table))
require.NoError(t, err)
if test.create != "" {
_, err = sqlDB.Exec(test.create)
require.NoError(t, err)
}
_, err = sqlDB.ExecContext(context.Background(), test.sql, test.args...)
if test.errString != "" {
testutils.IsError(err, test.errString)
} else {
require.NoError(t, err)
res := sqlDB.QueryRow(fmt.Sprintf("SELECT count(*) FROM %q", test.table))
require.NoError(t, res.Err())
var numRows int
err = res.Scan(&numRows)
require.NoError(t, err)
if numRows == 0 {
t.Error("expected some rows after import")
}
}
})
}
})
}
// There are two goals of this testcase:
//
// 1) Ensure that we can properly export from REGIONAL BY ROW tables (that the
// hidden row stays hidden, unless explicitly requested).
// 2) That we can import the exported data both into a non-RBR table, as well
// as a table which we can later convert to RBR, while preserving the
// crdb_region column data.
func TestMultiRegionExportImportRoundTrip(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
validateNumRows := func(sqlDB *gosql.DB, tableName string, expected int) {
res := sqlDB.QueryRow(fmt.Sprintf(`SELECT count(*) FROM %s`, tableName))
require.NoError(t, res.Err())
var numRows int
err := res.Scan(&numRows)
require.NoError(t, err)
if numRows != expected {
t.Errorf("expected %d rows after import, found %d", expected, numRows)
}
}
baseDir, dirCleanup := testutils.TempDir(t)
defer dirCleanup()
_, sqlDB, clusterCleanup := multiregionccltestutils.TestingCreateMultiRegionCluster(
t, 3 /* numServers */, base.TestingKnobs{}, &baseDir)
defer clusterCleanup()
_, err := sqlDB.Exec(`SET CLUSTER SETTING kv.bulk_ingest.batch_size = '10KB'`)
require.NoError(t, err)
// Create the database.
_, err = sqlDB.Exec(`CREATE DATABASE multi_region PRIMARY REGION "us-east1" REGIONS "us-east2", "us-east3";
USE multi_region;`)
require.NoError(t, err)
// Create the tables
_, err = sqlDB.Exec(`CREATE TABLE original_rbr (i int) LOCALITY REGIONAL BY ROW;
CREATE TABLE destination (i int);
CREATE TABLE destination_fake_rbr (crdb_region public.crdb_internal_region NOT NULL, i int);`)
require.NoError(t, err)
// Insert some data to the original table.
_, err = sqlDB.Exec(`INSERT INTO original_rbr values (1),(2),(3),(4),(5)`)
require.NoError(t, err)
// Export the data.
_, err = sqlDB.Exec(`EXPORT INTO CSV 'nodelocal://0/original_rbr_full'
FROM SELECT crdb_region, i from original_rbr;`)
require.NoError(t, err)
_, err = sqlDB.Exec(`EXPORT INTO CSV 'nodelocal://0/original_rbr_default'
FROM TABLE original_rbr;`)
require.NoError(t, err)
// Import the data back into the destination table.
_, err = sqlDB.Exec(`IMPORT into destination (i) CSV DATA
('nodelocal://0/original_rbr_default/export*.csv')`)
require.NoError(t, err)
validateNumRows(sqlDB, `destination`, 5)
// Import the full export to the fake RBR table.
_, err = sqlDB.Exec(`IMPORT into destination_fake_rbr (crdb_region, i) CSV DATA
('nodelocal://0/original_rbr_full/export*.csv')`)
require.NoError(t, err)
validateNumRows(sqlDB, `destination_fake_rbr`, 5)
// Convert fake table to full RBR table. This test is only required until we
// support IMPORT directly to RBR tables. The thinking behind this test is
// that this _could_ be one way that customers work-around the limitation of
// not supporting IMPORT to RBR tables in 21.1. Note that right now we can't
// make this column hidden (#62892).
_, err = sqlDB.Exec(`ALTER TABLE destination_fake_rbr ALTER COLUMN
crdb_region SET DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region;`)
require.NoError(t, err)
_, err = sqlDB.Exec(`ALTER TABLE destination_fake_rbr SET LOCALITY
REGIONAL BY ROW AS crdb_region;`)
require.NoError(t, err)
// Insert some more rows and ensure that the default values get generated.
_, err = sqlDB.Exec(`INSERT INTO destination_fake_rbr (i) values (6),(7),(3),(9),(10)`)
require.NoError(t, err)
validateNumRows(sqlDB, `destination_fake_rbr`, 10)
}
// TestImportClientDisconnect ensures that an import job can complete even if
// the client connection which started it closes. This test uses a helper
// subprocess to force a closed client connection without needing to rely
// on the driver to close a TCP connection. See TestImportClientDisconnectHelper
// for the subprocess.
func TestImportClientDisconnect(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
args := base.TestClusterArgs{}
tc := testcluster.StartTestCluster(t, 1, args)
defer tc.Stopper().Stop(ctx)
tc.WaitForNodeLiveness(t)
require.NoError(t, tc.WaitForFullReplication())
conn := tc.ServerConn(0)
runner := sqlutils.MakeSQLRunner(conn)
runner.Exec(t, "SET CLUSTER SETTING kv.protectedts.poll_interval = '100ms';")
// Make a server that will tell us when somebody has sent a request, wait to
// be signaled, and then serve a CSV row for our table.
allowResponse := make(chan struct{})
gotRequest := make(chan struct{}, 1)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
return
}
select {
case gotRequest <- struct{}{}:
default:
}
select {
case <-allowResponse:
case <-ctx.Done(): // Deal with test failures.
}
_, _ = w.Write([]byte("1,asdfasdfasdfasdf"))
}))
defer srv.Close()
// Make credentials for the new connection.
runner.Exec(t, `CREATE USER testuser`)
runner.Exec(t, `GRANT admin TO testuser`)
pgURL, cleanup := sqlutils.PGUrl(t, tc.Server(0).ServingSQLAddr(),
"TestImportClientDisconnect-testuser", url.User("testuser"))
defer cleanup()
// Kick off the import on a new connection which we're going to close.
done := make(chan struct{})
ctxToCancel, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
defer close(done)
connCfg, err := pgx.ParseConnectionString(pgURL.String())
assert.NoError(t, err)
db, err := pgx.Connect(connCfg)
assert.NoError(t, err)
defer func() { _ = db.Close() }()
_, err = db.ExecEx(ctxToCancel, `IMPORT TABLE foo (k INT PRIMARY KEY, v STRING) CSV DATA ($1)`,
nil /* options */, srv.URL)
assert.Equal(t, context.Canceled, err)
}()
// Wait for the import job to start.
var jobID string
testutils.SucceedsSoon(t, func() error {
row := conn.QueryRow("SELECT job_id FROM [SHOW JOBS] WHERE job_type = 'IMPORT' ORDER BY created DESC LIMIT 1")
return row.Scan(&jobID)
})
// Wait for it to actually start.
<-gotRequest
// Cancel the import context and wait for the goroutine to exit.
cancel()
<-done
// Allow the import to proceed.
close(allowResponse)
// Wait for the job to get marked as succeeded.
testutils.SucceedsSoon(t, func() error {
var status string
if err := conn.QueryRow("SELECT status FROM [SHOW JOB " + jobID + "]").Scan(&status); err != nil {
return err
}
const succeeded = "succeeded"
if status != succeeded {
return errors.Errorf("expected %s, got %v", succeeded, status)
}
return nil
})
}
func TestDisallowsInvalidFormatOptions(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
allOpts := make(map[string]struct{})
addOpts := func(opts map[string]struct{}) {
for opt := range opts {
allOpts[opt] = struct{}{}
}
}
addOpts(allowedCommonOptions)
addOpts(avroAllowedOptions)
addOpts(csvAllowedOptions)
addOpts(mysqlDumpAllowedOptions)
addOpts(mysqlOutAllowedOptions)
addOpts(pgDumpAllowedOptions)
addOpts(pgCopyAllowedOptions)
// Helper to pick num options from the set of allowed and the set
// of all other options. Returns generated options plus a flag indicating
// if the generated options contain disallowed ones.
pickOpts := func(num int, allowed map[string]struct{}) (map[string]string, bool) {
opts := make(map[string]string, num)
haveDisallowed := false
var picks []string
if rand.Intn(10) > 5 {
for opt := range allOpts {
picks = append(picks, opt)
}
} else {
for opt := range allowed {
picks = append(picks, opt)
}
}
require.NotNil(t, picks)
for i := 0; i < num; i++ {
pick := picks[rand.Intn(len(picks))]
_, allowed := allowed[pick]
if !allowed {
_, allowed = allowedCommonOptions[pick]
}
if allowed {
opts[pick] = "ok"
} else {
opts[pick] = "bad"
haveDisallowed = true
}
}
return opts, haveDisallowed
}
tests := []struct {
format string
allowed map[string]struct{}
}{
{"avro", avroAllowedOptions},
{"csv", csvAllowedOptions},
{"mysqouout", mysqlOutAllowedOptions},
{"mysqldump", mysqlDumpAllowedOptions},
{"pgdump", pgDumpAllowedOptions},
{"pgcopy", pgCopyAllowedOptions},
}
for _, tc := range tests {
for i := 0; i < 5; i++ {
opts, haveBadOptions := pickOpts(i, tc.allowed)
t.Run(fmt.Sprintf("validate-%s-%d/badOpts=%t", tc.format, i, haveBadOptions),
func(t *testing.T) {
err := validateFormatOptions(tc.format, opts, tc.allowed)
if haveBadOptions {
require.Error(t, err, opts)
} else {
require.NoError(t, err, opts)
}
})
}
}
}
func TestImportInTenant(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
baseDir := filepath.Join("testdata")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
// Setup a few tenants, each with a different table.
_, conn10 := serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{TenantID: roachpb.MakeTenantID(10)})
defer conn10.Close()
t10 := sqlutils.MakeSQLRunner(conn10)
// Prevent a logging assertion that the server ID is initialized multiple times.
log.TestingClearServerIdentifiers()
// Setup a few tenants, each with a different table.
_, conn11 := serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{TenantID: roachpb.MakeTenantID(11)})
defer conn11.Close()
t11 := sqlutils.MakeSQLRunner(conn11)
const userfileURI = "userfile://defaultdb.public.root/test.csv"
const importStmt = "IMPORT TABLE foo (k INT PRIMARY KEY, v INT) CSV DATA ($1)"
// Upload different files to same userfile name on each of host and tenants.
require.NoError(t, putUserfile(ctx, conn, security.RootUserName(), userfileURI, []byte("1,2\n3,4")))
require.NoError(t, putUserfile(ctx, conn10, security.RootUserName(), userfileURI, []byte("10,2")))
require.NoError(t, putUserfile(ctx, conn11, security.RootUserName(), userfileURI, []byte("11,22\n33,44\n55,66")))
sqlDB.Exec(t, importStmt, userfileURI)
sqlDB.CheckQueryResults(t, "SELECT * FROM foo", [][]string{{"1", "2"}, {"3", "4"}})
t10.Exec(t, importStmt, userfileURI)
t10.CheckQueryResults(t, "SELECT * FROM foo", [][]string{{"10", "2"}})
t11.Exec(t, importStmt, userfileURI)
t11.CheckQueryResults(t, "SELECT * FROM foo", [][]string{{"11", "22"}, {"33", "44"}, {"55", "66"}})
}
func putUserfile(
ctx context.Context, conn *gosql.DB, user security.SQLUsername, uri string, content []byte,
) error {
tx, err := conn.BeginTx(ctx, nil)
if err != nil {
return err
}
stmt, err := tx.Prepare(sql.CopyInFileStmt(uri, sql.CrdbInternalName, sql.UserFileUploadTable))
if err != nil {
return err
}
var sent int
for sent < len(content) {
chunk := 1024
if sent+chunk >= len(content) {
chunk = len(content) - sent
}
_, err = stmt.Exec(string(content[sent : sent+chunk]))
if err != nil {
return err
}
sent += chunk
}
if err := stmt.Close(); err != nil {
return err
}
return tx.Commit()
}
func waitForJobResult(
t *testing.T, tc *testcluster.TestCluster, id jobspb.JobID, expected jobs.Status,
) {
// Force newly created job to be adopted and verify its result.
tc.Server(0).JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
testutils.SucceedsSoon(t, func() error {
var unused int64
return tc.ServerConn(0).QueryRow(
"SELECT job_id FROM [SHOW JOBS] WHERE job_id = $1 AND status = $2",
id, expected).Scan(&unused)
})
}
func TestDetachedImport(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata", "avro")
args := base.TestServerArgs{ExternalIODir: baseDir}
tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(ctx)
connDB := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(connDB)
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
simpleOcf := fmt.Sprintf("nodelocal://0/%s", "simple.ocf")
importQuery := `IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea) AVRO DATA ($1)`
importQueryDetached := importQuery + " WITH DETACHED"
importIntoQuery := `IMPORT INTO simple AVRO DATA ($1)`
importIntoQueryDetached := importIntoQuery + " WITH DETACHED"
// DETACHED import w/out transaction is okay.
var jobID jobspb.JobID
sqlDB.QueryRow(t, importQueryDetached, simpleOcf).Scan(&jobID)
waitForJobResult(t, tc, jobID, jobs.StatusSucceeded)
sqlDB.Exec(t, "DROP table simple")
// Running import under transaction requires DETACHED option.
importWithoutDetached := func(txn *gosql.Tx) error {
return txn.QueryRow(importQuery, simpleOcf).Scan(&jobID)
}
err := crdb.ExecuteTx(ctx, connDB, nil, importWithoutDetached)
require.True(t,
testutils.IsError(err, "IMPORT cannot be used inside a transaction without DETACHED option"))
// We can execute IMPORT under transaction with detached option.
importWithDetached := func(txn *gosql.Tx) error {
return txn.QueryRow(importQueryDetached, simpleOcf).Scan(&jobID)
}
err = crdb.ExecuteTx(ctx, connDB, nil, importWithDetached)
require.NoError(t, err)
waitForJobResult(t, tc, jobID, jobs.StatusSucceeded)
sqlDB.Exec(t, "DROP table simple")
// Detached import should fail when the table already exists.
sqlDB.QueryRow(t, importQueryDetached, simpleOcf).Scan(&jobID)
waitForJobResult(t, tc, jobID, jobs.StatusSucceeded)
sqlDB.QueryRow(t, importQueryDetached, simpleOcf).Scan(&jobID)
waitForJobResult(t, tc, jobID, jobs.StatusFailed)
sqlDB.Exec(t, "DROP table simple")
// Detached import into should fail when there are key collisions.
sqlDB.QueryRow(t, importQueryDetached, simpleOcf).Scan(&jobID)
waitForJobResult(t, tc, jobID, jobs.StatusSucceeded)
sqlDB.QueryRow(t, importIntoQueryDetached, simpleOcf).Scan(&jobID)
waitForJobResult(t, tc, jobID, jobs.StatusFailed)
}
func TestImportJobEventLogging(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.ScopeWithoutShowLogs(t).Close(t)
defer jobs.TestingSetProgressThresholds()()
defer jobs.TestingSetAdoptAndCancelIntervals(100*time.Millisecond, 100*time.Millisecond)()
const (
nodes = 3
)
ctx := context.Background()
baseDir := filepath.Join("testdata", "avro")
args := base.TestServerArgs{ExternalIODir: baseDir}
params := base.TestClusterArgs{ServerArgs: args}
tc := testcluster.StartTestCluster(t, nodes, params)
defer tc.Stopper().Stop(ctx)
var forceFailure bool
for i := range tc.Servers {
tc.Servers[i].JobRegistry().(*jobs.Registry).TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*importResumer)
r.testingKnobs.afterImport = func(_ backupccl.RowCount) error {
if forceFailure {
return errors.New("testing injected failure")
}
return nil
}
return r
},
}
}
connDB := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(connDB)
simpleOcf := fmt.Sprintf("nodelocal://0/%s", "simple.ocf")
// First, let's test the happy path. Start a job, allow it to succeed and check
// the event log for the entries.
sqlDB.Exec(t, `CREATE DATABASE foo; SET DATABASE = foo`)
beforeImport := timeutil.Now()
importQuery := `IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea) AVRO DATA ($1)`
var jobID int64
var unused interface{}
sqlDB.QueryRow(t, importQuery, simpleOcf).Scan(&jobID, &unused, &unused, &unused, &unused,
&unused)
expectedStatus := []string{string(jobs.StatusSucceeded), string(jobs.StatusRunning)}
backupccl.CheckEmittedEvents(t, expectedStatus, beforeImport.UnixNano(), jobID, "import", "IMPORT")
sqlDB.Exec(t, `DROP TABLE simple`)
// Now let's test the events that are emitted when a job fails.
forceFailure = true
beforeSecondImport := timeutil.Now()
secondImport := `IMPORT TABLE simple (i INT8 PRIMARY KEY, s text, b bytea) AVRO DATA ($1)`
sqlDB.ExpectErrSucceedsSoon(t, "testing injected failure", secondImport, simpleOcf)
row := sqlDB.QueryRow(t, "SELECT job_id FROM [SHOW JOBS] WHERE status = 'failed'")
row.Scan(&jobID)
expectedStatus = []string{string(jobs.StatusFailed), string(jobs.StatusReverting),
string(jobs.StatusRunning)}
backupccl.CheckEmittedEvents(t, expectedStatus, beforeSecondImport.UnixNano(), jobID, "import", "IMPORT")
}
|
package utils
//package main
import (
"crypto/sha1"
"fmt"
"os"
// "github.com/op/go-logging"
"io"
"log"
"math/rand"
"strconv"
"sync"
"time"
)
type LogLevel uint32
const (
NoticeLevel LogLevel = 1
FatalLevel LogLevel = 2
WarningLevel LogLevel = 3
DebugLevel LogLevel = 4
)
type LogControl struct {
TimeGap int64 //间隔时间,单位秒
FileName string //日志文件名
FilePath string //日志路径
FileOut *os.File
FileMutex sync.Mutex //日志锁
LogLevel LogLevel //当前日志级别
LogFormat string
}
var DebugLog *LogControl
var FatalLog *LogControl
var WarningLog *LogControl
var NoticeLog *LogControl
var GlobalLogLevel LogLevel
// 传入timegap 单位为分钟
func (this *LogControl) Init(timegap int64, filename string, filepath string, loglevel LogLevel) (err error) {
// 内部转化为秒
this.TimeGap = timegap * 60
this.FileName = filename
this.FilePath = filepath
this.LogLevel = loglevel
switch loglevel {
case NoticeLevel:
this.LogFormat = "NOTICE: "
case FatalLevel:
this.LogFormat = "FATAL: "
case WarningLevel:
this.LogFormat = "WARNING: "
case DebugLevel:
this.LogFormat = "DEBUG: "
}
if this.LogLevel > GlobalLogLevel {
return
}
err = this.open_file()
if err != nil {
return
}
go this.LogCut()
return
}
func (this *LogControl) Write(format string, args ...interface{}) (err error) {
if this.LogLevel > GlobalLogLevel {
return
}
this.FileMutex.Lock()
defer this.FileMutex.Unlock()
err = this.check_valid()
if err != nil {
return err
}
var body string
head := fmt.Sprintf("%s %s * ", this.LogFormat, time.Now().Format("2006-01-02 15:04:05"))
if args != nil {
body = fmt.Sprintf(format, args...)
} else {
body = format
}
_, err = this.FileOut.Write([]byte(head + body + "\n"))
return
}
func (this *LogControl) open_file() (err error) {
this.FileOut, err = os.OpenFile(this.FilePath+"/"+this.FileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return
}
return
}
func (this *LogControl) check_valid() (err error) {
//这部分代码有待商榷。。。时间开销
_, err = os.Stat(this.FilePath + "/" + this.FileName)
if err != nil {
err = this.open_file()
if err != nil {
return
}
}
return
}
func (this *LogControl) LogCut() {
var err error
for {
nowtime := time.Now().Unix()
nexttime := int64(nowtime/this.TimeGap+1) * this.TimeGap
var delta time.Duration
delta = time.Duration(nexttime - nowtime)
time.Sleep(time.Second * delta)
this.FileMutex.Lock()
// date_format := time.Now().Truncate(time.Duration(this.TimeGap * time.Second)).Format("200601021504")
date_now := time.Now().Unix() - this.TimeGap
date_format := time.Unix(date_now, 0).Format("200601021504")
err = this.check_valid()
if err != nil {
log.Printf("check log file fail. err[%s]", err.Error())
os.Exit(-1)
}
this.FileOut.Close()
os.Rename(this.FilePath+this.FileName, this.FilePath+this.FileName+"."+date_format+"00")
err = this.open_file()
if err != nil {
os.Exit(-1)
}
this.FileMutex.Unlock()
}
return
}
func GenSearchid(imei string) (searchid string) {
var tmp string
tmp = imei
tmp += time.Now().String()
tmp += strconv.Itoa(rand.Int())
DebugLog.Write("searchid is %s", tmp)
sha1_t := sha1.New()
io.WriteString(sha1_t, tmp)
searchid = fmt.Sprintf("%x", sha1_t.Sum(nil))
return
}
|
// @APIVersion 1.0.0
// @Title beego Test API
// @Description beego has a very cool tools to autogenerate documents for your API
// @Contact astaxie@gmail.com
// @TermsOfServiceUrl http://beego.me/
// @License Apache 2.0
// @LicenseUrl http://www.apache.org/licenses/LICENSE-2.0.html
package routers
import (
"Android-api/controllers"
"github.com/astaxie/beego"
)
func init() {
ns := beego.NewNamespace("/v1",
beego.NSNamespace("/barcode2",
beego.NSInclude(
&controllers.Barcode2Controller{},
),
),
beego.NSNamespace("/ch_dietnutri",
beego.NSInclude(
&controllers.ChDietnutriController{},
),
),
beego.NSNamespace("/ch_dietnutri_meterial",
beego.NSInclude(
&controllers.ChDietnutriMeterialController{},
),
),
beego.NSNamespace("/ch_dietnutricrawl",
beego.NSInclude(
&controllers.ChDietnutricrawlController{},
),
),
beego.NSNamespace("/ch_dris",
beego.NSInclude(
&controllers.ChDrisController{},
),
),
beego.NSNamespace("/chineseFoodGroup",
beego.NSInclude(
&controllers.ChineseFoodGroupController{},
),
),
beego.NSNamespace("/chinesefoodgoodsnutri",
beego.NSInclude(
&controllers.ChinesefoodgoodsnutriController{},
),
),
beego.NSNamespace("/chinesefoodnutri",
beego.NSInclude(
&controllers.ChinesefoodnutriController{},
),
),
beego.NSNamespace("/chinesefoodnutrifromhk",
beego.NSInclude(
&controllers.ChinesefoodnutrifromhkController{},
),
),
beego.NSNamespace("/chinesevegetable",
beego.NSInclude(
&controllers.ChinesevegetableController{},
),
),
beego.NSNamespace("/cookbook",
beego.NSInclude(
&controllers.CookbookController{},
),
),
beego.NSNamespace("/deliciousFoodInfo",
beego.NSInclude(
&controllers.DeliciousFoodInfoController{},
),
),
beego.NSNamespace("/food_data",
beego.NSInclude(
&controllers.FoodDataController{},
),
),
beego.NSNamespace("/foodfrombhcp",
beego.NSInclude(
&controllers.FoodfrombhcpController{},
),
),
beego.NSNamespace("/foodsafenews",
beego.NSInclude(
&controllers.FoodsafenewsController{},
),
),
beego.NSNamespace("/foodsecurityfromzccw",
beego.NSInclude(
&controllers.FoodsecurityfromzccwController{},
),
),
beego.NSNamespace("/foodvideo",
beego.NSInclude(
&controllers.FoodvideoController{},
),
),
beego.NSNamespace("/kitchenstrories",
beego.NSInclude(
&controllers.KitchenstroriesController{},
),
),
beego.NSNamespace("/media2_web",
beego.NSInclude(
&controllers.Media2WebController{},
),
),
beego.NSNamespace("/media_video",
beego.NSInclude(
&controllers.MediaVideoController{},
),
),
beego.NSNamespace("/news",
beego.NSInclude(
&controllers.NewsController{},
),
),
beego.NSNamespace("/sports_data",
beego.NSInclude(
&controllers.SportsDataController{},
),
),
beego.NSNamespace("/sporttype",
beego.NSInclude(
&controllers.SporttypeController{},
),
),
/*
beego.NSNamespace("/user_basic",
beego.NSInclude(
&controllers.UserBasicController{},
),
),
*/
beego.NSNamespace("/user_goal",
beego.NSInclude(
&controllers.UserGoalController{},
),
),
beego.NSNamespace("/user_monitor",
beego.NSInclude(
&controllers.UserMonitorController{},
),
),
)
beego.AddNamespace(ns)
beego.Router("/user_basic/:id", &controllers.UserBasicController{}, "get:GetOne;post:Post")
beego.Router("/user_basic/query/:Mobile", &controllers.UserBasicController{}, "get:Get;post:Post")
beego.Router("/user_basic/update/:Mobile/:oldpwd/:newpwd", &controllers.UserBasicController{}, "put:Update;post:Post")
}
|
package stack
import (
"fmt"
"testing"
)
func init() {
}
func TestSimpleInsertSort(t *testing.T) {
var s Stack = NewArrayStack()
fmt.Println("1.", s.IsEmpty())
for i := 0; i < 10; i++ {
s.Push(i)
}
fmt.Println("2.", s.IsEmpty())
for i := 0; i < 10; i++ {
item, _ := s.Peek()
fmt.Print(item)
}
fmt.Println()
fmt.Println("3.", s.IsEmpty())
for i := 0; i < 10; i++ {
item, _ := s.Pop()
fmt.Print(item)
}
fmt.Println()
fmt.Println("4.", s.IsEmpty())
}
|
package csblob
import (
"crypto/x509"
"encoding/asn1"
)
// Extensions for specific types of key usage.
// These endorse a leaf certificate to create signatures with the named capability.
// https://images.apple.com/certificateauthority/pdf/Apple_WWDR_CPS_v1.22.pdf
var (
CodeSign = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1}
CodeSignApple = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 1}
CodeSignIphoneDev = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 2}
CodeSignIphoneApple = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 3}
CodeSignIphoneSubmit = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 4}
CodeSignSafariExtension = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 5}
CodeSignMacAppSubmit = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 7}
CodeSignMacInstallerSubmit = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 8}
CodeSignMacAppStore = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 9}
CodeSignMacAppStoreInstaller = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 10}
CodeSignMacDev = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 12}
CodeSignDevIDExecute = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 13}
CodeSignDevIDInstall = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 14}
CodeSignDevIDKernel = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 1, 18}
)
// These endorse an intermediate certificate to sign a certain type of leaf.
var (
Intermediate = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 2}
IntermediateWWDR = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 2, 1}
IntermediateITMS = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 2, 2}
IntermediateAAI = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 2, 3}
IntermediateDevID = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 6, 2, 6}
)
// Authenticated attributes found in a signature
var (
// AttrCodeDirHashPlist holds a plist with (truncated) hashes of each code
// directory found in the signature
AttrCodeDirHashPlist = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 9, 1}
// AttrCodeDirHashes is a set of code directory digests identified by ASN.1
// algorithm
AttrCodeDirHashes = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 9, 2}
)
func hasPrefix(id, prefix asn1.ObjectIdentifier) bool {
if len(id) < len(prefix) {
return false
}
return id[:len(prefix)].Equal(prefix)
}
// MarkHandledExtensions marks proprietary critical extensions as handled so
// that chain verification can proceed
func MarkHandledExtensions(cert *x509.Certificate) {
var unhandled []asn1.ObjectIdentifier
for _, ext := range cert.UnhandledCriticalExtensions {
if !hasPrefix(ext, CodeSign) {
unhandled = append(unhandled, ext)
}
}
cert.UnhandledCriticalExtensions = unhandled
}
// TeamID returns the team identifier found in an apple-issued leaf certificate,
// or "" if none was found
func TeamID(cert *x509.Certificate) string {
for _, ext := range cert.Extensions {
if hasPrefix(ext.Id, CodeSign) {
// team id should be in the OU field
if v := cert.Subject.OrganizationalUnit; len(v) == 1 {
return v[0]
}
}
}
return ""
}
// RootCA lists known proprietary certificate roots
const RootCA = `-----BEGIN CERTIFICATE-----
MIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzET
MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv
biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0
MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw
bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx
FjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg+
+FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1
XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9w
tj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IW
q6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKM
aLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8E
BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3
R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAE
ggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93
d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNl
IG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0
YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBj
b25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZp
Y2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBc
NplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQP
y3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7
R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4Fg
xhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oP
IQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AX
UKqK1drk/NAJBzewdXUh
-----END CERTIFICATE-----
`
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// MemcachedSpec defines the desired state of Memcached
type MemcachedSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Foo is an example field of Memcached. Edit Memcached_types.go to remove/update
Size int32 `json:"size"`
Schedule CronSchedule `json:"schedule"`
}
// describes a Cron schedule.
type CronSchedule struct {
// specifies the minute during which the job executes.
// +optional
Minute *CronField `json:"minute,omitempty"`
// specifies the hour during which the job executes.
// +optional
Hour *CronField `json:"hour,omitempty"`
// specifies the day of the month during which the job executes.
// +optional
DayOfMonth *CronField `json:"dayOfMonth,omitempty"`
// specifies the month during which the job executes.
// +optional
Month *CronField `json:"month,omitempty"`
// specifies the day of the week during which the job executes.
// +optional
DayOfWeek *CronField `json:"dayOfWeek,omitempty"`
}
/*
Finally, we'll define a wrapper type to represent a field.
We could attach additional validation to this field,
but for now we'll just use it for documentation purposes.
*/
// represents a Cron field specifier.
type CronField string
// MemcachedStatus defines the observed state of Memcached
type MemcachedStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
Nodes []string `json:"nodes"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Memcached is the Schema for the memcacheds API
type Memcached struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MemcachedSpec `json:"spec,omitempty"`
Status MemcachedStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// MemcachedList contains a list of Memcached
type MemcachedList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Memcached `json:"items"`
}
func init() {
SchemeBuilder.Register(&Memcached{}, &MemcachedList{})
}
|
package main
import (
"fmt"
"github.com/silenceshell/algorithms-in-golang/utils"
)
type SortedBinaryNode struct {
left *SortedBinaryNode
right *SortedBinaryNode
data int
}
type SortedBinaryTree struct {
root *SortedBinaryNode
}
func (tree *SortedBinaryTree) insert(num int) *SortedBinaryTree {
if tree.root == nil {
tree.root = &SortedBinaryNode{left: nil, right: nil, data: num}
} else {
tree.root.insert(num)
}
return tree
}
func (node *SortedBinaryNode) insert(num int) error {
if node == nil {
return fmt.Errorf("node is nil, insert %d failed.", num)
}
if num <= node.data {
if node.left == nil {
node.left = &SortedBinaryNode{left: nil, right: nil, data: num}
} else {
node.left.insert(num)
}
} else {
if node.right == nil {
node.right = &SortedBinaryNode{left: nil, right: nil, data: num}
} else {
node.right.insert(num)
}
}
return nil
}
func (tree *SortedBinaryTree) print() {
tree.root.print()
}
func (node *SortedBinaryNode) print() {
if node.left != nil {
node.left.print()
}
fmt.Printf("%d ", node.data)
if node.right != nil {
node.right.print()
}
}
func main() {
tree := &SortedBinaryTree{}
data := utils.GenerateSlice(13)
fmt.Println(data)
for _, v := range data {
tree.insert(v)
}
tree.print()
}
|
package ghq
import (
"encoding/json"
"io/ioutil"
"os"
)
var Config map[string]string
// loading config.json in memory.
func (r *Router) LoadConfig() (err error) {
configFile, err := os.Open("config.json")
if err != nil {
return
}
defer configFile.Close()
configBytes, err := ioutil.ReadAll(configFile)
if err != nil {
return
}
Config = make(map[string]string)
err = json.Unmarshal(configBytes, &Config)
if err != nil {
return
}
//r.Config = Config
return
}
//func (r *Router) GetConfig(configName string) (config string, ok bool) {
func GetConfig(configName string) (config string, ok bool) {
//config,ok = r.Config[configName]
config, ok = Config[configName]
return
}
|
package controller
import (
"errors"
"fmt"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/luxingwen/secret-game/dao"
"github.com/luxingwen/secret-game/model"
"github.com/luxingwen/secret-game/tools"
)
type TeamController struct {
}
func (ctl *TeamController) Create(c *gin.Context) {
team := new(model.Team)
err := c.ShouldBind(&team)
if err != nil {
handleErr(c, CodeReqErr, err)
return
}
team.Created = time.Now()
team.LeaderId = int64(c.GetInt("wxUserId"))
err = dao.GetDao().AddTeam(team)
if err != nil && strings.Contains(err.Error(), "Duplicate entry") {
handleErr(c, CodeExist, errors.New("队伍名称已经存在"))
return
} else if err != nil {
handleErr(c, CodeDBErr, err)
return
}
handleOk(c, "ok")
}
func (ctl *TeamController) List(c *gin.Context) {
search := new(model.TeamListSearch)
err := c.ShouldBind(&search)
if err != nil {
fmt.Println("err", err)
return
}
search.UserId = int64(c.GetInt("wxUserId"))
res, err := dao.GetDao().List(search)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
handleOk(c, res)
}
type ReqJoinTeam struct {
TeamId int `json:"team_id"`
}
func (ctl *TeamController) JoinTeam(c *gin.Context) {
uid := c.GetInt("wxUserId")
var req ReqJoinTeam
err := c.ShouldBind(&req)
if err != nil {
handleErr(c, CodeReqErr, err)
return
}
wxUser, err := dao.GetDao().GetWxUser(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
mdata := make(map[string]interface{}, 0)
mdata["nickname"] = wxUser.NickName
mdata["uid"] = wxUser.ID
mdata["avatar_url"] = wxUser.AvatarUrl
NotifyTeams(uid, "quit_team", mdata)
err = dao.GetDao().BeforeJoinTeamQuitTeam(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
err = dao.GetDao().JoinTeam(uid, req.TeamId)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
NotifyTeams(uid, "join_team", mdata)
handleOk(c, "ok")
}
type ReqQuitTeam struct {
TeamId int `json:"team_id"`
}
func (ctl *TeamController) QuiteTeam(c *gin.Context) {
uid := c.GetInt("wxUserId")
var req ReqQuitTeam
err := c.ShouldBind(&req)
if err != nil {
handleErr(c, CodeReqErr, err)
return
}
wxUser, err := dao.GetDao().GetWxUser(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
mdata := make(map[string]interface{}, 0)
mdata["nickname"] = wxUser.NickName
mdata["uid"] = wxUser.ID
mdata["avatar_url"] = wxUser.AvatarUrl
NotifyTeams(uid, "quit_team", mdata)
err = dao.GetDao().QuitTeam(uid, req.TeamId)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
handleOk(c, "ok")
}
func (ctl *TeamController) TeamInfo(c *gin.Context) {
uid := c.GetInt("wxUserId")
res, err := dao.GetDao().GetTeamInfo(uid)
if err != nil && err.Error() != "record not found" {
handleErr(c, CodeDBErr, err)
return
}
handleOk(c, res)
}
// 查询队伍聊天
func (ctl *TeamController) TeamChatList(c *gin.Context) {
uid := c.GetInt("wxUserId")
res, err := dao.GetDao().GetTeamInfo(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
chatList, err := tools.TeamChatList(int(res.Id))
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
handleOk(c, chatList)
}
type Chat struct {
Content string `json:"content"`
}
// 聊天
func (ctl *TeamController) TeamChat(c *gin.Context) {
uid := c.GetInt("wxUserId")
var chat Chat
err := c.ShouldBind(&chat)
if err != nil {
handleErr(c, CodeReqErr, err)
return
}
res, err := dao.GetDao().GetTeamInfo(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
tools.TeamChat(uid, int(res.Id), chat.Content)
wxUser, err := dao.GetDao().GetWxUser(uid)
if err != nil {
handleErr(c, CodeDBErr, err)
return
}
mdata := make(map[string]interface{}, 0)
mdata["nickname"] = wxUser.NickName
mdata["uid"] = wxUser.ID
mdata["avatar_url"] = wxUser.AvatarUrl
mdata["content"] = chat.Content
NotifyTeams(uid, "team_chat", mdata)
handleOk(c, "ok")
}
// 上传头像
func (ctl *TeamController) HeaderImg(c *gin.Context) {
var saveUrl string
// 头像上传
file, err := c.FormFile("file")
if err != nil {
fmt.Println(err)
}
if file != nil {
saveUrl = tools.GetHeadImgUrl(file.Filename)
c.SaveUploadedFile(file, saveUrl)
}
handleOk(c, saveUrl)
}
|
package url
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestFnHostname(t *testing.T) {
f := &fnHostname{}
input := "https://subdomain.example.com/path?q=hello world#fragment with space"
v, err := f.Eval(input)
assert.Nil(t, err)
assert.Equal(t, "subdomain.example.com", v)
}
|
package ServerManager
import (
"golang.org/x/net/context"
"net"
"log"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc"
pb "MarketServer"
"database/sql"
_ "github.com/lib/pq"
)
var s *grpc.Server
var db *sql.DB
func init() {
var err error
db, err = sql.Open("postgres", "host=localhost user=postgres password=hespw123 dbname=content sslmode=require")
if err != nil {
log.Fatal(err)
}
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s = grpc.NewServer()
pb.RegisterGreeterServer(s, &GreetingServer{})
runLoginServer()
runUserDownloadServer()
runAvailableGameServer()
// Register reflection service on gRPC GreetingServer.
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
const (
port = ":50051"
)
// Server types used to implement servers.
type GreetingServer struct{}
// SayHello implements helloworld.GreeterServer
func (s *GreetingServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
return &pb.HelloReply{Message: "Hello " + in.Name}, nil
}
func Run() {
}
|
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io"
"os"
"reflect"
"strings"
)
const prefix = "server_"
func pretty_print_type_expr(out io.Writer, e ast.Expr) {
ty := reflect.TypeOf(e)
switch t := e.(type) {
case *ast.StarExpr:
fmt.Fprintf(out, "*")
pretty_print_type_expr(out, t.X)
case *ast.Ident:
fmt.Fprintf(out, t.Name)
case *ast.ArrayType:
fmt.Fprintf(out, "[]")
pretty_print_type_expr(out, t.Elt)
case *ast.SelectorExpr:
pretty_print_type_expr(out, t.X)
fmt.Fprintf(out, ".%s", t.Sel.Name)
case *ast.FuncType:
fmt.Fprintf(out, "func(")
pretty_print_func_field_list(out, t.Params)
fmt.Fprintf(out, ")")
buf := bytes.NewBuffer(make([]byte, 0, 256))
nresults := pretty_print_func_field_list(buf, t.Results)
if nresults > 0 {
results := buf.String()
if strings.Index(results, " ") != -1 {
results = "(" + results + ")"
}
fmt.Fprintf(out, " %s", results)
}
case *ast.MapType:
fmt.Fprintf(out, "map[")
pretty_print_type_expr(out, t.Key)
fmt.Fprintf(out, "]")
pretty_print_type_expr(out, t.Value)
case *ast.InterfaceType:
fmt.Fprintf(out, "interface{}")
case *ast.Ellipsis:
fmt.Fprintf(out, "...")
pretty_print_type_expr(out, t.Elt)
default:
fmt.Fprintf(out, "\n[!!] unknown type: %s\n", ty.String())
}
}
func pretty_print_func_field_list(out io.Writer, f *ast.FieldList) int {
count := 0
if f == nil {
return count
}
for i, field := range f.List {
// names
if field.Names != nil {
for j, name := range field.Names {
fmt.Fprintf(out, "%s", name.Name)
if j != len(field.Names)-1 {
fmt.Fprintf(out, ", ")
}
count++
}
fmt.Fprintf(out, " ")
} else {
count++
}
// type
pretty_print_type_expr(out, field.Type)
// ,
if i != len(f.List)-1 {
fmt.Fprintf(out, ", ")
}
}
return count
}
func pretty_print_func_field_list_using_args(out io.Writer, f *ast.FieldList) int {
count := 0
if f == nil {
return count
}
for i, field := range f.List {
// names
if field.Names != nil {
for j := range field.Names {
fmt.Fprintf(out, "Arg%d", count)
if j != len(field.Names)-1 {
fmt.Fprintf(out, ", ")
}
count++
}
fmt.Fprintf(out, " ")
} else {
count++
}
// type
pretty_print_type_expr(out, field.Type)
// ,
if i != len(f.List)-1 {
fmt.Fprintf(out, ", ")
}
}
return count
}
func generate_struct_wrapper(out io.Writer, fun *ast.FieldList, structname, name string) int {
fmt.Fprintf(out, "type %s_%s struct {\n", structname, name)
argn := 0
for _, field := range fun.List {
fmt.Fprintf(out, "\t")
// names
if field.Names != nil {
for j := range field.Names {
fmt.Fprintf(out, "Arg%d", argn)
if j != len(field.Names)-1 {
fmt.Fprintf(out, ", ")
}
argn++
}
fmt.Fprintf(out, " ")
} else {
fmt.Fprintf(out, "Arg%d ", argn)
argn++
}
// type
pretty_print_type_expr(out, field.Type)
// \n
fmt.Fprintf(out, "\n")
}
fmt.Fprintf(out, "}\n")
return argn
}
// function that is being exposed to an RPC API, but calls simple "Server_" one
func generate_server_rpc_wrapper(out io.Writer, fun *ast.FuncDecl, name string, argcnt, replycnt int) {
fmt.Fprintf(out, "func (r *RPC) RPC_%s(args *Args_%s, reply *Reply_%s) error {\n",
name, name, name)
fmt.Fprintf(out, "\t")
for i := 0; i < replycnt; i++ {
fmt.Fprintf(out, "reply.Arg%d", i)
if i != replycnt-1 {
fmt.Fprintf(out, ", ")
}
}
fmt.Fprintf(out, " = %s(", fun.Name.Name)
for i := 0; i < argcnt; i++ {
fmt.Fprintf(out, "args.Arg%d", i)
if i != argcnt-1 {
fmt.Fprintf(out, ", ")
}
}
fmt.Fprintf(out, ")\n")
fmt.Fprintf(out, "\treturn nil\n}\n")
}
func generate_client_rpc_wrapper(out io.Writer, fun *ast.FuncDecl, name string, argcnt, replycnt int) {
fmt.Fprintf(out, "func client_%s(cli *rpc.Client, ", name)
pretty_print_func_field_list_using_args(out, fun.Type.Params)
fmt.Fprintf(out, ")")
buf := bytes.NewBuffer(make([]byte, 0, 256))
nresults := pretty_print_func_field_list(buf, fun.Type.Results)
if nresults > 0 {
results := buf.String()
if strings.Index(results, " ") != -1 {
results = "(" + results + ")"
}
fmt.Fprintf(out, " %s", results)
}
fmt.Fprintf(out, " {\n")
fmt.Fprintf(out, "\tvar args Args_%s\n", name)
fmt.Fprintf(out, "\tvar reply Reply_%s\n", name)
for i := 0; i < argcnt; i++ {
fmt.Fprintf(out, "\targs.Arg%d = Arg%d\n", i, i)
}
fmt.Fprintf(out, "\terr := cli.Call(\"RPC.RPC_%s\", &args, &reply)\n", name)
fmt.Fprintf(out, "\tif err != nil {\n")
fmt.Fprintf(out, "\t\tpanic(err)\n\t}\n")
fmt.Fprintf(out, "\treturn ")
for i := 0; i < replycnt; i++ {
fmt.Fprintf(out, "reply.Arg%d", i)
if i != replycnt-1 {
fmt.Fprintf(out, ", ")
}
}
fmt.Fprintf(out, "\n}\n")
}
func wrap_function(out io.Writer, fun *ast.FuncDecl) {
name := fun.Name.Name[len(prefix):]
fmt.Fprintf(out, "// wrapper for: %s\n\n", fun.Name.Name)
argcnt := generate_struct_wrapper(out, fun.Type.Params, "Args", name)
replycnt := generate_struct_wrapper(out, fun.Type.Results, "Reply", name)
generate_server_rpc_wrapper(out, fun, name, argcnt, replycnt)
generate_client_rpc_wrapper(out, fun, name, argcnt, replycnt)
fmt.Fprintf(out, "\n")
}
func process_file(out io.Writer, filename string) {
fset := token.NewFileSet()
file, err := parser.ParseFile(fset, filename, nil, 0)
if err != nil {
panic(err)
}
for _, decl := range file.Decls {
if fdecl, ok := decl.(*ast.FuncDecl); ok {
namelen := len(fdecl.Name.Name)
if namelen >= len(prefix) && fdecl.Name.Name[0:len(prefix)] == prefix {
wrap_function(out, fdecl)
}
}
}
}
const head = `// WARNING! Autogenerated by goremote, don't touch.
package main
import (
"net/rpc"
)
type RPC struct {
}
`
func main() {
flag.Parse()
fmt.Fprintf(os.Stdout, head)
for _, file := range flag.Args() {
process_file(os.Stdout, file)
}
}
|
package slack
import (
"github.com/shiv3/slackube/app/controller/slackcontoller"
"github.com/slack-go/slack"
"github.com/shiv3/slackube/app/usecase"
"github.com/labstack/echo/v4"
)
type (
Handler interface {
SlackEvents(c echo.Context) error
SlackActions(c echo.Context) error
}
handlerImpl struct {
signingSecret string
slackBotToken string
slackRouter *slackcontoller.SlackRouter
}
)
func NewHandlerImpl(signingSecret string, slackBotToken string) (*handlerImpl, error) {
u, err := usecase.NewUsecasesImpl()
if err != nil {
return nil, err
}
return &handlerImpl{
signingSecret: signingSecret,
slackBotToken: slackBotToken,
slackRouter: slackcontoller.NewSlackRouter(u, slack.New(slackBotToken)),
}, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.