text
stringlengths 11
4.05M
|
|---|
/*
DEPRECATED: use go.pedge.io/pkg/yaml instead!
https://go.pedge.io/pkg
*/
package yaml2json
import "errors"
type TransformOptions struct {
Pretty bool
Indent string
}
func Transform(p []byte, options TransformOptions) ([]byte, error) {
return nil, errors.New("yaml2json is deprecated! Use go.pedge.io/pkg/yaml instead! https://go.pedge.io/pkg")
}
|
package main
import (
"fmt"
"strings"
postgresv1 "github.com/cloud-ark/kubeplus/postgres-crd-v2/pkg/apis/postgrescontroller/v1"
)
func getCreateUserCommands(desiredList []postgresv1.UserSpec) []string {
var cmdList []string
for _, user := range desiredList {
username := user.User
password := user.Password
createUserCmd := strings.Fields("create user " + username + " with password '" + password + "';")
var cmdString = strings.Join(createUserCmd, " ")
fmt.Printf("CreateUserCmd: %v\n", cmdString)
cmdList = append(cmdList, cmdString)
}
return cmdList
}
func getDropUserCommands(desiredList []postgresv1.UserSpec) []string {
var cmdList []string
for _, user := range desiredList {
username := user.User
dropUserCmd := strings.Fields("drop user " + username + ";")
var cmdString = strings.Join(dropUserCmd, " ")
fmt.Printf("DropUserCmd: %v\n", cmdString)
cmdList = append(cmdList, cmdString)
}
return cmdList
}
func getAlterUserCommands(desiredList []postgresv1.UserSpec) []string {
var cmdList []string
for _, user := range desiredList {
username := user.User
password := user.Password
dropUserCmd := strings.Fields("alter user " + username + " with password '" + password + "';")
var cmdString = strings.Join(dropUserCmd, " ")
fmt.Printf("AlterUserCmd: %v\n", cmdString)
cmdList = append(cmdList, cmdString)
}
return cmdList
}
func getUserDiffList(desired []postgresv1.UserSpec, current []postgresv1.UserSpec) []postgresv1.UserSpec {
var diffList []postgresv1.UserSpec
for _, v := range desired {
var found bool = false
for _, v1 := range current {
if v.User == v1.User {
found = true
}
}
if !found {
diffList = append(diffList, v)
}
}
//fmt.Printf("-- DiffList: %v--\n", diffList)
return diffList
}
func getUserCommonList(desired []postgresv1.UserSpec, current []postgresv1.UserSpec) []postgresv1.UserSpec {
var modifyList []postgresv1.UserSpec
for _, v := range desired {
for _, v1 := range current {
if v.User == v1.User {
if v.Password != v1.Password {
modifyList = append(modifyList, v)
}
}
}
}
//fmt.Printf("-- ModifyList: %v--\n", modifyList)
return modifyList
}
func getUserCommands(desiredList []postgresv1.UserSpec, currentList []postgresv1.UserSpec) ([]string, []string, []string) {
var createUserCommands []string
var dropUserCommands []string
var alterUserCommands []string
if len(currentList) == 0 {
createUserCommands = getCreateUserCommands(desiredList)
} else {
addList := getUserDiffList(desiredList, currentList)
createUserCommands = getCreateUserCommands(addList)
dropList := getUserDiffList(currentList, desiredList)
dropUserCommands = getDropUserCommands(dropList)
alterList := getUserCommonList(desiredList, currentList)
alterUserCommands = getAlterUserCommands(alterList)
}
return createUserCommands, dropUserCommands, alterUserCommands
}
|
package lib
import (
"regexp"
"strings"
)
var tokenSeparatorPatternSource = `[^A-Za-zА-Яа-я0-9_]+`
var tokenSeparatorRegExp = regexp.MustCompile(tokenSeparatorPatternSource)
func Tokenize(content string) []string {
delim := " "
tokenized := tokenSeparatorRegExp.ReplaceAll([]byte(content), []byte(delim))
return strings.Split(SanitizeToken(string(tokenized)), delim)
}
type Token struct {
value string
start int
end int
}
func TokenizeWithIndex(workOffset int, contentP *string) *[]Token {
content := *contentP
tokens := []Token{}
positions := tokenSeparatorRegExp.FindAllStringIndex(content, -1)
offset := 0
for _, pos := range positions {
tokens = append(tokens, Token{
SanitizeToken(content[offset:pos[0]]),
workOffset + offset,
workOffset + pos[0],
})
offset = pos[1]
}
tokens = append(tokens, Token{
SanitizeToken(content[offset:]),
workOffset + offset,
workOffset + len(content),
})
return &tokens
}
func CreateNGrams(tokens []string, n int) []string {
rv := []string{}
for ti, _ := range tokens {
if ti+n <= len(tokens) {
ngramToken := []string{}
for i := ti; i < ti+n; i++ {
ngramToken = append(ngramToken, tokens[i])
}
rv = append(rv, strings.Join(ngramToken, " "))
}
}
return rv
}
|
package v1beta1
import (
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// HyperConvergedName is the name of the HyperConverged resource that will be reconciled
const HyperConvergedName = "kubevirt-hyperconverged"
// HyperConvergedSpec defines the desired state of HyperConverged
// +k8s:openapi-gen=true
type HyperConvergedSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
// LocalStorageClassName the name of the local storage class.
LocalStorageClassName string `json:"localStorageClassName,omitempty"`
// infra HyperConvergedConfig influences the pod configuration (currently only placement)
// for all the infra components needed on the virtualization enabled cluster
// but not necessarely directly on each node running VMs/VMIs.
// +optional
Infra HyperConvergedConfig `json:"infra,omitempty"`
// workloads HyperConvergedConfig influences the pod configuration (currently only placement) of components
// which need to be running on a node where virtualization workloads should be able to run.
// Changes to Workloads HyperConvergedConfig can be applied only without existing workload.
// +optional
Workloads HyperConvergedConfig `json:"workloads,omitempty"`
// featureGates is a map of feature gate flags. Setting a flag to `true` will enable
// the feature. Setting `false` or removing the feature gate, disables the feature.
// +kubebuilder:default={"withHostPassthroughCPU": false, "sriovLiveMigration": false}
// +optional
FeatureGates HyperConvergedFeatureGates `json:"featureGates,omitempty"`
// Live migration limits and timeouts are applied so that migration processes do not
// overwhelm the cluster.
// +kubebuilder:default={"bandwidthPerMigration": "64Mi", "completionTimeoutPerGiB": 800, "parallelMigrationsPerCluster": 5, "parallelOutboundMigrationsPerNode": 2, "progressTimeout": 150}
// +optional
LiveMigrationConfig LiveMigrationConfigurations `json:"liveMigrationConfig,omitempty"`
// PermittedHostDevices holds information about devices allowed for passthrough
// +optional
PermittedHostDevices *PermittedHostDevices `json:"permittedHostDevices,omitempty"`
// certConfig holds the rotation policy for internal, self-signed certificates
// +kubebuilder:default={"ca": {"duration": "48h0m0s", "renewBefore": "24h0m0s"}, "server": {"duration": "24h0m0s", "renewBefore": "12h0m0s"}}
// +optional
CertConfig HyperConvergedCertConfig `json:"certConfig,omitempty"`
// ResourceRequirements describes the resource requirements for the operand workloads.
// +optional
ResourceRequirements *OperandResourceRequirements `json:"resourceRequirements,omitempty"`
// Override the storage class used for scratch space during transfer operations. The scratch space storage class
// is determined in the following order:
// value of scratchSpaceStorageClass, if that doesn't exist, use the default storage class, if there is no default
// storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for
// scratch space
// +optional
ScratchSpaceStorageClass *string `json:"scratchSpaceStorageClass,omitempty"`
// VDDK Init Image eventually used to import VMs from external providers
// +optional
VddkInitImage *string `json:"vddkInitImage,omitempty"`
// ObsoleteCPUs allows avoiding scheduling of VMs for obsolete CPU models
// +optional
ObsoleteCPUs *HyperConvergedObsoleteCPUs `json:"obsoleteCPUs,omitempty"`
// StorageImport contains configuration for importing containerized data
// +optional
StorageImport *StorageImportConfig `json:"storageImport,omitempty"`
// WorkloadUpdateStrategy defines at the cluster level how to handle automated workload updates
// +kubebuilder:default={"workloadUpdateMethods": {"LiveMigrate", "Evict"}, "batchEvictionSize": 10, "batchEvictionInterval": "1m"}
// +optional
WorkloadUpdateStrategy *HyperConvergedWorkloadUpdateStrategy `json:"workloadUpdateStrategy,omitempty"`
// operator version
// +optional
Version string `json:"version,omitempty"`
}
// CertRotateConfigCA contains the tunables for TLS certificates.
// +k8s:openapi-gen=true
type CertRotateConfigCA struct {
// The requested 'duration' (i.e. lifetime) of the Certificate.
// This should comply with golang's ParseDuration format (https://golang.org/pkg/time/#ParseDuration)
// +kubebuilder:default="48h0m0s"
// +optional
Duration metav1.Duration `json:"duration,omitempty"`
// The amount of time before the currently issued certificate's `notAfter`
// time that we will begin to attempt to renew the certificate.
// This should comply with golang's ParseDuration format (https://golang.org/pkg/time/#ParseDuration)
// +kubebuilder:default="24h0m0s"
// +optional
RenewBefore metav1.Duration `json:"renewBefore,omitempty"`
}
// CertRotateConfigServer contains the tunables for TLS certificates.
// +k8s:openapi-gen=true
type CertRotateConfigServer struct {
// The requested 'duration' (i.e. lifetime) of the Certificate.
// This should comply with golang's ParseDuration format (https://golang.org/pkg/time/#ParseDuration)
// +kubebuilder:default="24h0m0s"
// +optional
Duration metav1.Duration `json:"duration,omitempty"`
// The amount of time before the currently issued certificate's `notAfter`
// time that we will begin to attempt to renew the certificate.
// This should comply with golang's ParseDuration format (https://golang.org/pkg/time/#ParseDuration)
// +kubebuilder:default="12h0m0s"
// +optional
RenewBefore metav1.Duration `json:"renewBefore,omitempty"`
}
// HyperConvergedCertConfig holds the CertConfig entries for the HCO operands
// +k8s:openapi-gen=true
type HyperConvergedCertConfig struct {
// CA configuration -
// CA certs are kept in the CA bundle as long as they are valid
// +kubebuilder:default={"duration": "48h0m0s", "renewBefore": "24h0m0s"}
// +optional
CA CertRotateConfigCA `json:"ca,omitempty"`
// Server configuration -
// Certs are rotated and discarded
// +kubebuilder:default={"duration": "24h0m0s", "renewBefore": "12h0m0s"}
// +optional
Server CertRotateConfigServer `json:"server,omitempty"`
}
// HyperConvergedConfig defines a set of configurations to pass to components
type HyperConvergedConfig struct {
// NodePlacement describes node scheduling configuration.
// +optional
NodePlacement *sdkapi.NodePlacement `json:"nodePlacement,omitempty"`
}
// LiveMigrationConfigurations - Live migration limits and timeouts are applied so that migration processes do not
// overwhelm the cluster.
// +k8s:openapi-gen=true
type LiveMigrationConfigurations struct {
// Number of migrations running in parallel in the cluster.
// +optional
// +kubebuilder:default=5
ParallelMigrationsPerCluster *uint32 `json:"parallelMigrationsPerCluster,omitempty"`
// Maximum number of outbound migrations per node.
// +optional
// +kubebuilder:default=2
ParallelOutboundMigrationsPerNode *uint32 `json:"parallelOutboundMigrationsPerNode,omitempty"`
// Bandwidth limit of each migration, in MiB/s.
// +optional
// +kubebuilder:default="64Mi"
// +kubebuilder:validation:Pattern=^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
BandwidthPerMigration *string `json:"bandwidthPerMigration,omitempty"`
// The migration will be canceled if it has not completed in this time, in seconds per GiB
// of memory. For example, a virtual machine instance with 6GiB memory will timeout if it has not completed
// migration in 4800 seconds. If the Migration Method is BlockMigration, the size of the migrating disks is included
// in the calculation.
// +kubebuilder:default=800
// +optional
CompletionTimeoutPerGiB *int64 `json:"completionTimeoutPerGiB,omitempty"`
// The migration will be canceled if memory copy fails to make progress in this time, in seconds.
// +kubebuilder:default=150
// +optional
ProgressTimeout *int64 `json:"progressTimeout,omitempty"`
}
// HyperConvergedFeatureGates is a set of optional feature gates to enable or disable new features that are not enabled
// by default yet.
// +k8s:openapi-gen=true
type HyperConvergedFeatureGates struct {
// Allow migrating a virtual machine with CPU host-passthrough mode. This should be
// enabled only when the Cluster is homogeneous from CPU HW perspective doc here
// +optional
// +kubebuilder:default=false
WithHostPassthroughCPU bool `json:"withHostPassthroughCPU"`
// Allow migrating a virtual machine with SRIOV interfaces.
// When enabled virt-launcher pods of virtual machines with SRIOV
// interfaces run with CAP_SYS_RESOURCE capability.
// This may degrade virt-launcher security.
// +optional
// +kubebuilder:default=false
SRIOVLiveMigration bool `json:"sriovLiveMigration"`
}
// PermittedHostDevices holds information about devices allowed for passthrough
// +k8s:openapi-gen=true
type PermittedHostDevices struct {
// +listType=map
// +listMapKey=pciDeviceSelector
PciHostDevices []PciHostDevice `json:"pciHostDevices,omitempty"`
// +listType=map
// +listMapKey=mdevNameSelector
MediatedDevices []MediatedHostDevice `json:"mediatedDevices,omitempty"`
}
// PciHostDevice represents a host PCI device allowed for passthrough
// +k8s:openapi-gen=true
type PciHostDevice struct {
// a combination of a vendor_id:product_id required to identify a PCI device on a host.
PCIDeviceSelector string `json:"pciDeviceSelector"`
// name by which a device is advertised and being requested
ResourceName string `json:"resourceName"`
// indicates that this resource is being provided by an external device plugin
// +optional
ExternalResourceProvider bool `json:"externalResourceProvider,omitempty"`
// HCO enforces the existence of several PciHostDevice objects. Set disabled field to true instead of remove
// these objects.
// +optional
Disabled bool `json:"disabled,omitempty"`
}
// MediatedHostDevice represents a host mediated device allowed for passthrough
// +k8s:openapi-gen=true
type MediatedHostDevice struct {
// name of a mediated device type required to identify a mediated device on a host
MDEVNameSelector string `json:"mdevNameSelector"`
// name by which a device is advertised and being requested
ResourceName string `json:"resourceName"`
// indicates that this resource is being provided by an external device plugin
// +optional
ExternalResourceProvider bool `json:"externalResourceProvider,omitempty"`
// HCO enforces the existence of several MediatedHostDevice objects. Set disabled field to true instead of remove
// these objects.
// +optional
Disabled bool `json:"disabled,omitempty"`
}
// OperandResourceRequirements is a list of resource requirements for the operand workloads pods
// +k8s:openapi-gen=true
type OperandResourceRequirements struct {
// StorageWorkloads defines the resources requirements for storage workloads. It will propagate to the CDI custom
// resource
// +optional
StorageWorkloads *corev1.ResourceRequirements `json:"storageWorkloads,omitempty"`
}
// HyperConvergedObsoleteCPUs allows avoiding scheduling of VMs for obsolete CPU models
// +k8s:openapi-gen=true
type HyperConvergedObsoleteCPUs struct {
// MinCPUModel is the Minimum CPU model that is used for basic CPU features; e.g. Penryn or Haswell.
// The default value for this field is nil, but in KubeVirt, the default value is "Penryn", if nothing else is set.
// Use this field to override KubeVirt default value.
// +optional
MinCPUModel string `json:"minCPUModel,omitempty"`
// CPUModels is a list of obsolete CPU models. When the node-labeller obtains the list of obsolete CPU models, it
// eliminates those CPU models and creates labels for valid CPU models.
// The default values for this field is nil, however, HCO uses opinionated values, and adding values to this list
// will add them to the opinionated values.
// +optional
CPUModels []string `json:"cpuModels,omitempty"`
}
// StorageImportConfig contains configuration for importing containerized data
// +k8s:openapi-gen=true
type StorageImportConfig struct {
// InsecureRegistries is a list of image registries URLs that are not secured. Setting an insecure registry URL
// in this list allows pulling images from this registry.
// +optional
InsecureRegistries []string `json:"insecureRegistries,omitempty"`
}
//
// HyperConvergedWorkloadUpdateStrategy defines options related to updating a KubeVirt install
//
// +k8s:openapi-gen=true
type HyperConvergedWorkloadUpdateStrategy struct {
// WorkloadUpdateMethods defines the methods that can be used to disrupt workloads
// during automated workload updates.
// When multiple methods are present, the least disruptive method takes
// precedence over more disruptive methods. For example if both LiveMigrate and Shutdown
// methods are listed, only VMs which are not live migratable will be restarted/shutdown.
// An empty list defaults to no automated workload updating.
//
// +listType=atomic
// +kubebuilder:default={"LiveMigrate", "Evict"}
// +optional
WorkloadUpdateMethods []string `json:"workloadUpdateMethods,omitempty"`
// BatchEvictionSize Represents the number of VMIs that can be forced updated per
// the BatchShutdownInteral interval
//
// +kubebuilder:default=10
// +optional
BatchEvictionSize *int `json:"batchEvictionSize,omitempty"`
// BatchEvictionInterval Represents the interval to wait before issuing the next
// batch of shutdowns
//
// +kubebuilder:default="1m"
// +optional
BatchEvictionInterval *metav1.Duration `json:"batchEvictionInterval,omitempty"`
}
// HyperConvergedStatus defines the observed state of HyperConverged
// +k8s:openapi-gen=true
type HyperConvergedStatus struct {
// Conditions describes the state of the HyperConverged resource.
// +patchMergeKey=type
// +patchStrategy=merge
// +optional
Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// RelatedObjects is a list of objects created and maintained by this
// operator. Object references will be added to this list after they have
// been created AND found in the cluster.
// +optional
RelatedObjects []corev1.ObjectReference `json:"relatedObjects,omitempty"`
// Versions is a list of HCO component versions, as name/version pairs. The version with a name of "operator"
// is the HCO version itself, as described here:
// https://github.com/openshift/cluster-version-operator/blob/master/docs/dev/clusteroperator.md#version
// +optional
Versions Versions `json:"versions,omitempty"`
}
func (hcs *HyperConvergedStatus) UpdateVersion(name, version string) {
if hcs.Versions == nil {
hcs.Versions = Versions{}
}
hcs.Versions.updateVersion(name, version)
}
func (hcs *HyperConvergedStatus) GetVersion(name string) (string, bool) {
return hcs.Versions.getVersion(name)
}
type Version struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
}
func newVersion(name, version string) Version {
return Version{Name: name, Version: version}
}
type Versions []Version
func (vs *Versions) updateVersion(name, version string) {
for i, v := range *vs {
if v.Name == name {
(*vs)[i].Version = version
return
}
}
*vs = append(*vs, newVersion(name, version))
}
func (vs *Versions) getVersion(name string) (string, bool) {
for _, v := range *vs {
if v.Name == name {
return v.Version, true
}
}
return "", false
}
const (
// ConditionReconcileComplete communicates the status of the HyperConverged resource's
// reconcile functionality. Basically, is the Reconcile function running to completion.
ConditionReconcileComplete conditionsv1.ConditionType = "ReconcileComplete"
// ConditionTaintedConfiguration indicates that a hidden/debug configuration
// has been applied to the HyperConverged resource via a specialized annotation.
// This condition is exposed only when its value is True, and is otherwise hidden.
ConditionTaintedConfiguration conditionsv1.ConditionType = "TaintedConfiguration"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HyperConverged is the Schema for the hyperconvergeds API
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
// +kubebuilder:resource:scope=Namespaced,categories={all},shortName={hco,hcos}
// +kubebuilder:subresource:status
type HyperConverged struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// +kubebuilder:default={"certConfig": {"ca": {"duration": "48h0m0s", "renewBefore": "24h0m0s"}, "server": {"duration": "24h0m0s", "renewBefore": "12h0m0s"}}, "featureGates": {"withHostPassthroughCPU": false, "sriovLiveMigration": false}, "liveMigrationConfig": {"bandwidthPerMigration": "64Mi", "completionTimeoutPerGiB": 800, "parallelMigrationsPerCluster": 5, "parallelOutboundMigrationsPerNode": 2, "progressTimeout": 150}}
// +optional
Spec HyperConvergedSpec `json:"spec,omitempty"`
Status HyperConvergedStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HyperConvergedList contains a list of HyperConverged
type HyperConvergedList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HyperConverged `json:"items"`
}
func init() {
SchemeBuilder.Register(&HyperConverged{}, &HyperConvergedList{})
}
|
package command
import (
"fmt"
"strconv"
"github.com/spf13/cobra"
"go.mercari.io/hcledit"
)
type CreateOptions struct {
Type string
After string
Comment string
}
func NewCmdCreate() *cobra.Command {
opts := &CreateOptions{}
cmd := &cobra.Command{
Use: "create <query> <value> <file>",
Short: "Create a new field",
Long: `Runs an address query on a hcl file and create new field with given value.`,
Args: cobra.ExactArgs(3),
RunE: func(_ *cobra.Command, args []string) error {
return runCreate(opts, args)
},
}
cmd.Flags().StringVarP(&opts.Type, "type", "t", "string", "Type of the value")
cmd.Flags().StringVarP(&opts.After, "after", "a", "", "Field key which before the value will be created")
cmd.Flags().StringVarP(&opts.Comment, "comment", "c", "", "Comment to be inserted before the field added. Comment symbols like // are required")
return cmd
}
func runCreate(opts *CreateOptions, args []string) error {
query, valueStr, filePath := args[0], args[1], args[2]
editor, err := hcledit.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read file: %s", err)
}
value, err := convert(valueStr, opts.Type)
if err != nil {
return fmt.Errorf("failed to convert input to specific type: %s", err)
}
if err := editor.Create(query, value, hcledit.WithAfter(opts.After), hcledit.WithComment(opts.Comment)); err != nil {
return fmt.Errorf("failed to create: %s", err)
}
return editor.OverWriteFile()
}
func convert(inputStr, typeStr string) (interface{}, error) {
switch typeStr {
case "string":
return inputStr, nil
case "int":
return strconv.Atoi(inputStr)
case "bool":
return strconv.ParseBool(inputStr)
case "raw":
return hcledit.RawVal(inputStr), nil
default:
return nil, fmt.Errorf("unsupported type: %s", typeStr)
}
}
|
package logger
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"path/filepath"
"strings"
)
var logger *zap.SugaredLogger
var levelMap = map[string]zapcore.Level{
"debug": zapcore.DebugLevel,
"info": zapcore.InfoLevel,
"warn": zapcore.WarnLevel,
"error": zapcore.ErrorLevel,
"dpanic": zapcore.DPanicLevel,
"panic": zapcore.PanicLevel,
"fatal": zapcore.FatalLevel,
}
func getLoggerLevel(lvl string) zapcore.Level {
if level, ok := levelMap[lvl]; ok {
return level
}
return zapcore.InfoLevel
}
func StartLogger(logName string, logLevel string) {
appRootPath := "/var/log"
fileName := strings.Join([]string{appRootPath, "mysql-agent", logName}, string(filepath.Separator))
level := getLoggerLevel(logLevel)
syncWriter := zapcore.AddSync(&lumberjack.Logger{
Filename: fileName,
MaxSize: 100,
MaxAge: 7,
LocalTime: true,
Compress: true,
})
encoder := zap.NewProductionEncoderConfig()
encoder.EncodeTime = zapcore.ISO8601TimeEncoder
core := zapcore.NewCore(zapcore.NewConsoleEncoder(encoder), syncWriter, zap.NewAtomicLevelAt(level))
zapLogger := zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
logger = zapLogger.Sugar()
}
func Info(msg string, args ...interface{}) {
logger.Infof(msg, args...)
}
func Warn(msg string, args ...interface{}) {
logger.Warnf(msg, args...)
}
func Error(msg string, args ...interface{}) {
logger.Errorf(msg, args...)
}
func Fatal(msg string, args ...interface{}) {
logger.Fatalf(msg, args...)
}
func Panic(msg string, args ...interface{}) {
logger.Panicf(msg, args...)
}
func DPanic(msg string, args ...interface{}) {
logger.DPanicf(msg, args...)
}
|
package controllers
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"user-auth/model"
"user-auth/rand"
)
type jsonData struct {
Email string `json:"email"`
Password string `json:"password"`
}
type Users struct {
us *model.UserService
}
func NewUsers(us *model.UserService) *Users {
return &Users{us: us}
}
func (u *Users) Create(w http.ResponseWriter, r *http.Request) {
var u1 jsonData
b, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
json.Unmarshal(b, &u1)
user := model.User{
Email: u1.Email,
Password: u1.Password,
}
if err := u.us.Create(&user); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = u.signIn(w, &user)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Fprintln(w, "Successfully Created the account")
}
func (u *Users) signIn(w http.ResponseWriter, user *model.User) error {
if user.Remember == "" {
token, err := rand.RememberToken()
if err != nil {
return err
}
user.Remember = token
err = u.us.Update(user)
if err != nil {
return err
}
}
cookie := http.Cookie{
Name: "remember_token",
Value: user.Remember,
}
http.SetCookie(w, &cookie)
return nil
}
func (u *Users) Login(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var UserDetail model.User
json.Unmarshal(b, &UserDetail)
user, err := u.us.Authenticate(UserDetail.Email, UserDetail.Password)
if err != nil {
switch err {
case model.ErrNotFound:
fmt.Fprintln(w, "Invalid email address")
case model.ErrInvalidPassword:
fmt.Fprintln(w, "Invalid password")
default:
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
err = u.signIn(w, user)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Fprintln(w, "Logged in, success", user)
}
func (u *Users) Home(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "This is home page")
}
func (u *Users) SayHello(w http.ResponseWriter, r *http.Request) {
cookie, _ := r.Cookie("remember_token")
user, _ := u.us.ByRememberToken(cookie.Value)
fmt.Fprintln(w, "Hello how are you", user.Email)
}
|
package ipc
import (
"errors"
"github.com/hokora/bank/util"
)
type Context struct {
from *Conn
Packet []byte
reqID uint16
}
func (ctx *Context) Reply(success bool, b []byte) (int, error) {
if ctx.reqID == 0 {
return 0, errors.New("cannot reply")
}
pw := util.NewPacketWriter(2 + 1 + len(b))
pw.AppendUInt16(ctx.reqID)
pw.AppendBool(success)
pw.AppendBytes(b)
return ctx.from.Send(pw.Pack())
}
|
package web
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestUrlFor(t *testing.T) {
loginUrl := UrlFor("login")
assert.Equal(t, "/login", loginUrl)
}
|
package main
// Use `dev_appserver.py --default_gcs_bucket_name GCS_BUCKET_NAME`
// when running locally.
import (
"fmt"
"html/template"
"io"
"net/http"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/file"
"google.golang.org/appengine/log"
)
const URL = "http://localhost:8080/nmg/image/team"
func serveError(ctx context.Context, w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "text/plain")
io.WriteString(w, "Internal Server Error")
log.Errorf(ctx, "%v", err)
}
var rootTemplate = template.Must(template.New("root").Parse(`
<html><body>
<form action="{{.}}" method="POST" enctype="multipart/form-data">
Upload File: <input type="file" name="file"><br>
<input type="submit" name="submit" value="Submit">
</form></body></html>
`))
func handleRoot(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
bucket, err := file.DefaultBucketName(ctx)
if err != nil {
log.Errorf(ctx, "failed to get default GCS bucket name: %v", err)
}
fmt.Println()
fmt.Println("bucket", bucket)
fmt.Println()
// uploadURL, err := blobstore.UploadURL(ctx, "/upload", nil)
// if err != nil {
// serveError(ctx, w, err)
// return
// }
w.Header().Set("Content-Type", "text/html")
err = rootTemplate.Execute(w, URL)
if err != nil {
log.Errorf(ctx, "%v", err)
}
}
func handleServe(w http.ResponseWriter, r *http.Request) {
}
func handleUpload(w http.ResponseWriter, r *http.Request) {
// ctx := appengine.NewContext(r)
}
func main() {
http.HandleFunc("/", handleRoot)
http.HandleFunc("/serve/", handleServe)
http.HandleFunc("/upload", handleUpload)
appengine.Main()
}
|
package server
import (
"encoding/json"
"fmt"
"net/http"
)
type HTTPError struct {
status int
detail string
err error
payload interface{}
}
func NewHTTPError(status int, detail string, err error, payload interface{}) error {
return &HTTPError{
status: status,
detail: detail,
err: err,
payload: payload,
}
}
func (e *HTTPError) Error() string {
if e.err == nil {
return fmt.Sprintf("response with status %d: %s", e.status, e.detail)
}
return fmt.Sprintf("response with status %d: %s", e.status, e.err)
}
func (e *HTTPError) Unwrap() error {
return e.err
}
func (e *HTTPError) ResponseBody() interface{} {
if e.payload != nil {
return e.payload
}
body := ErrorBody{
Error: e.detail,
}
if e.err != nil {
body.Error = e.err.Error()
}
return body
}
func (e *HTTPError) Response(w http.ResponseWriter) error {
w.WriteHeader(e.Status())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
body := e.ResponseBody()
var err error
if bytes, ok := body.(Raw); ok {
_, err = w.Write(bytes)
} else {
err = json.NewEncoder(w).Encode(body)
}
return err
}
func (e *HTTPError) Status() int {
return e.status
}
func (e *HTTPError) Err() error {
return e.err
}
func (e *HTTPError) Detail() string {
return e.detail
}
func (e *HTTPError) Data() interface{} {
return e.payload
}
type ErrorBody struct {
Error string
}
// ErrorBadRequest - 400.
func ErrorBadRequest(msg string, payload interface{}) error {
return NewHTTPError(http.StatusBadRequest, msg, nil, payload)
}
// ErrorUnauthorized - 401.
func ErrorUnauthorized(msg string, payload interface{}) error {
return NewHTTPError(http.StatusUnauthorized, msg, nil, payload)
}
// ErrorForbidden - 403.
func ErrorForbidden(msg string, payload interface{}) error {
return NewHTTPError(http.StatusForbidden, msg, nil, payload)
}
// ErrorNotFound - 404.
func ErrorNotFound(msg string, payload interface{}) error {
return NewHTTPError(http.StatusNotFound, msg, nil, payload)
}
|
package main
import (
"bufio"
"fmt"
"log"
"io"
"os"
"strconv"
)
func IntsFrom(r io.Reader) (numbers []int) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
x, err := strconv.Atoi(scanner.Text())
numbers = append(numbers, x)
if err != nil {
fmt.Println(err)
return numbers
}
}
return numbers
}
func CountInversions(numbers []int) (count int, merged []int) {
if len(numbers) == 1 {
return 0, numbers
}
median := (len(numbers) + 1) / 2
left := numbers[:median]
right := numbers[median:]
l_count, l_merged := CountInversions(left)
r_count, r_merged := CountInversions(right)
count = r_count + l_count
i, j := 0, 0
for m := 0; m < len(numbers); m++ {
if i >= len(l_merged) {
// Left list is exhausted
for j < len(r_merged) {
// Take remainder from right list
merged = append(merged, r_merged[j])
j++
}
if j + i != len(numbers) {
log.Fatal("line 64")
}
break
} else if j >= len(r_merged) {
// Right list is exhausted
for i < len(l_merged) {
// take from left list only
merged = append(merged, l_merged[i])
i++
}
// Small lists cannot add to original list
if j + i != len(numbers) {
log.Fatal("line 76")
}
break
} else {
if l_merged[i] > r_merged[j] {
merged = append(merged, r_merged[j])
j++
count += len(l_merged) - i
} else if l_merged[i] < r_merged[j] {
merged = append(merged, l_merged[i])
i++
} else {
log.Fatal("Something fucked, yo")
}
}
}
return count, merged
}
func main() {
filename := os.Args[1]
file, err := os.Open(filename)
if err != nil {
fmt.Println(err)
}
ints := IntsFrom(file)
if err != nil {
fmt.Println(err)
}
inversions, list := CountInversions(ints)
if len(list) != len(ints) {
log.Fatal("len(list) != len(ints)")
}
fmt.Println(inversions)
}
|
package api
type GameState struct {
Time int `json:"time"`
Game Game `json:"game"`
}
|
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type InsertResultErrorInfoHandlerIteratorQuery struct {
Users []*common.User `rel:"test_user:u"`
result common.User2Iterator
erh common.ErrorInfoHandler
}
|
package log
import (
"io/ioutil"
"os"
"testing"
api "github.com/alexeyqian/proglog/api/v1"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
)
type fn func(*testing.T, *Log)
func TestLog(t *testing.T) {
funcs := make(map[string]fn)
funcs["a"] = testAppendRead
funcs["b"] = testOutOfRangeErr
funcs["c"] = testInitExisting
funcs["d"] = testReader
funcs["e"] = testTrancate
for scenario, fn := range funcs {
t.Run(scenario, func(t *testing.T) {
dir, err := ioutil.TempDir("", "store-test")
require.NoError(t, err)
defer os.RemoveAll(dir)
c := Config{}
c.Segment.MaxStoreBytes = 32
log, err := NewLog(dir, c)
require.NoError(t, err)
fn(t, log)
})
}
}
func testAppendRead(t *testing.T, log *Log) {
rec := api.Record{
Value: []byte("hello world"),
}
off, err := log.Append(&rec)
require.NoError(t, err)
require.Equal(t, uint64(0), off)
readRec, err := log.Read(off)
require.NoError(t, err)
require.Equal(t, rec.Value, readRec.Value)
}
func testOutOfRangeErr(t *testing.T, log *Log) {
read, err := log.Read(1)
require.Nil(t, read)
//require.Error(t, err)
apiErr := err.(api.ErrOffsetOutOfRange)
require.Equal(t, uint64(1), apiErr.Offset)
}
func testInitExisting(t *testing.T, log *Log) {
rec := api.Record{
Value: []byte("hello world"),
}
for i := 0; i < 3; i++ {
_, err := log.Append(&rec)
require.NoError(t, err)
}
require.NoError(t, log.Close())
off, err := log.LowestOffset()
require.NoError(t, err)
require.Equal(t, uint64(0), off)
off, err = log.HighestOffset()
require.NoError(t, err)
require.Equal(t, uint64(2), off)
// creating new log instance by opening existing log and check it again
_, err = NewLog(log.Dir, log.Config)
require.NoError(t, err)
off, err = log.LowestOffset()
require.NoError(t, err)
require.Equal(t, uint64(0), off)
off, err = log.HighestOffset()
require.NoError(t, err)
require.Equal(t, uint64(2), off)
}
func testReader(t *testing.T, log *Log) {
rec := api.Record{
Value: []byte("HELLO WORLD"),
}
off, err := log.Append(&rec)
require.NoError(t, err)
require.Equal(t, uint64(0), off)
reader := log.Reader()
b, err := ioutil.ReadAll(reader)
require.NoError(t, err)
rec2 := api.Record{}
err = proto.Unmarshal(b[lenWidth:], &rec2)
require.NoError(t, err)
require.Equal(t, rec.Value, rec2.Value)
}
func testTrancate(t *testing.T, log *Log) {
rec := api.Record{
Value: []byte("hello world"),
}
for i := 0; i < 3; i++ {
_, err := log.Append(&rec)
require.NoError(t, err)
}
// remove old segments below 1
err := log.Truncate(1)
require.NoError(t, err)
_, err = log.Read(0)
require.Error(t, err)
}
|
package leptonica
/*
#cgo LDFLAGS: -llept
#include "leptonica/allheaders.h"
#include <stdlib.h>
l_uint8* uglycast(void* value) { return (l_uint8*)value; }
*/
import "C"
import (
"errors"
"sync"
"unsafe"
"fmt"
)
type goPix struct {
cPix *C.PIX
closed bool
lock sync.Mutex
}
// Deletes the pic, this must be called
func (p *goPix) Free() {
p.lock.Lock()
defer p.lock.Unlock()
if !p.closed {
// LEPT_DLL extern void pixDestroy ( PIX **ppix );
C.pixDestroy(&p.cPix)
C.free(unsafe.Pointer(p.cPix))
p.closed = true
}
}
// LEPT_DLL extern PIX * pixRead ( const char *filename );
// NewPixFromFile creates a new goPix from given filename
func NewPixFromFile(filename string) (*goPix, error) {
cFilename := C.CString(filename)
defer C.free(unsafe.Pointer(cFilename))
// create new PIX
CPIX := C.pixRead(cFilename)
if CPIX == nil {
return nil, errors.New("Unable to read file " + filename)
}
// all done
pix := &goPix{
cPix: CPIX,
}
return pix, nil
}
// NewPixReadMem creates a new goPix instance from a byte array
func NewPixReadMem(image *[]byte) (*goPix, error) {
ptr := C.uglycast(unsafe.Pointer(&(*image)[0]))
CPIX := C.pixReadMem(ptr, C.size_t(len(*image)))
if CPIX == nil {
return nil, errors.New("Not a valid image file")
}
pix := &goPix{
cPix: CPIX,
}
return pix, nil
}
// NewPixReadMem creates a new goPix instance from a byte array
func (p *goPix) PixWriteMemPnm() ([]byte, error) {
data := make([]byte, 20000000)
ptr := C.uglycast(unsafe.Pointer(&data[0]))
var size C.size_t
er := C.pixWriteMemPnm(&ptr, &size, p.cPix)
if er == 1 {
return data, errors.New(`Failed writing PBM to bytes`)
}
return data, nil
}
// -------------- IMAGE FUNCTIONS -------------
func (p *goPix) SkewAngle() (float32, float32) {
var angle, conf C.l_float32
C.pixFindSkew(p.cPix, &angle, &conf)
return float32(angle), float32(conf)
}
func (p *goPix) SkewAngleSlow() (float32, float32) {
var angle, conf C.l_float32
C.pixFindSkewSweepAndSearch(p.cPix, &angle, &conf, 1, 1, 10, 1, 0.01)
return float32(angle), float32(conf)
}
func (p *goPix) OrientationAngle() (*goPix, float32, int, error) {
//var a, c C.l_float32
var a C.l_float32 = 0
/*
newpix := C.pixDeskewGeneral(p.cPix, 1, 7, 0.01, 1, 0, &a, &c)
if newpix == nil {
return p, 0, 0, errors.New(`Deskew failed`)
}
p.Free()
*/
newpix := p.cPix
var upconf, leftconf C.l_float32
err := C.pixOrientDetect(newpix, &upconf, &leftconf, 1, 0)
if err == 1 {
C.pixDestroy(&newpix)
C.free(unsafe.Pointer(newpix))
return nil, 0, 0, errors.New(`Orientation detection failed`)
}
fmt.Println(float32(upconf), float32(leftconf))
var orient C.l_int32
err = C.makeOrientDecision(upconf, leftconf, 0.001, 0.001, &orient, 0)
if err == 1 {
C.pixDestroy(&newpix)
C.free(unsafe.Pointer(newpix))
return nil, 0, 0, errors.New(`Orientation decision failed`)
}
fmt.Println(int(orient))
radians := float32(a)
orientation := int(orient)
switch orientation {
case 2: radians += 1.57079633 // left-facing
tmp := C.pixRotate90(newpix, 1)
if tmp != newpix && tmp != nil {
C.pixDestroy(&newpix)
C.free(unsafe.Pointer(newpix))
newpix = tmp
}
case 3: radians += 3.14159265 // upside-down
tmp := C.pixRotate180(newpix, newpix)
if tmp != newpix && tmp != nil {
C.pixDestroy(&newpix)
C.free(unsafe.Pointer(newpix))
newpix = tmp
}
case 4: radians += 4.71238898 // right-facing
tmp := C.pixRotate90(newpix, -1)
if tmp != newpix && tmp != nil {
C.pixDestroy(&newpix)
C.free(unsafe.Pointer(newpix))
newpix = tmp
}
}
for radians > 6.28318531 {
radians -= 6.28318531
}
pix := &goPix{
cPix: newpix,
}
return pix, radians, orientation, nil
}
|
package httpd
import (
"fmt"
"os"
"gopkg.in/yaml.v2"
)
type Buildpack struct {
HTTPD BuildpackHTTPD `yaml:"httpd"`
}
type BuildpackHTTPD struct {
Version string `yaml:"version"`
}
func ParseBuildpack(path string) (Buildpack, error) {
file, err := os.Open(path)
if err != nil {
return Buildpack{}, fmt.Errorf("failed to parse buildpack.yml: %w", err)
}
defer file.Close()
var buildpack Buildpack
err = yaml.NewDecoder(file).Decode(&buildpack)
if err != nil {
return Buildpack{}, fmt.Errorf("failed to parse buildpack.yml: %w", err)
}
return buildpack, nil
}
|
package passwordcombiner
import (
"crypto/sha256"
"errors"
"fmt"
"github.com/cloudfoundry-incubator/cloud-service-broker/db_service/models"
"github.com/cloudfoundry-incubator/cloud-service-broker/internal/encryption/gcmencryptor"
"github.com/cloudfoundry-incubator/cloud-service-broker/internal/encryption/passwordparser"
"golang.org/x/crypto/pbkdf2"
"gorm.io/gorm"
)
func Combine(db *gorm.DB, parsed []passwordparser.PasswordEntry, storedPassMetadata []models.PasswordMetadata) (CombinedPasswords, error) {
stored, storedPrimary, err := storedWithPrimary(storedPassMetadata)
if err != nil {
return nil, err
}
labels := make(map[string]struct{})
var result CombinedPasswords
for _, p := range parsed {
labels[p.Label] = struct{}{}
combinedPassword := func() (CombinedPassword, error) {
s, ok := stored[p.Label]
switch ok {
case true:
return mergeWithStoredMetadata(s, p)
default:
return saveNewPasswordMetadata(db, p)
}
}
entry, err := combinedPassword()
if err != nil {
return nil, err
}
result = append(result, entry)
}
if _, ok := labels[storedPrimary]; storedPrimary != "" && !ok {
return nil, fmt.Errorf("the password labelled %q must be supplied to decrypt the database", storedPrimary)
}
return result, nil
}
func saveNewPasswordMetadata(db *gorm.DB, p passwordparser.PasswordEntry) (CombinedPassword, error) {
salt, err := randomSalt()
if err != nil {
return CombinedPassword{}, err
}
e := encryptor(p.Secret, salt)
canary, err := encryptCanary(e)
if err != nil {
return CombinedPassword{}, err
}
err = db.Create(&models.PasswordMetadata{
Label: p.Label,
Salt: salt,
Canary: canary,
Primary: false, // Primary updated after successful rotation
}).Error
if err != nil {
return CombinedPassword{}, err
}
return CombinedPassword{
Label: p.Label,
Secret: p.Secret,
Salt: salt,
Encryptor: e,
configuredPrimary: p.Primary,
}, nil
}
func mergeWithStoredMetadata(s models.PasswordMetadata, p passwordparser.PasswordEntry) (CombinedPassword, error) {
e := encryptor(p.Secret, s.Salt)
if err := decryptCanary(e, s.Canary, p.Label); err != nil {
return CombinedPassword{}, err
}
return CombinedPassword{
Label: p.Label,
Secret: p.Secret,
Salt: s.Salt,
Encryptor: e,
configuredPrimary: p.Primary,
storedPrimary: s.Primary,
}, nil
}
func storedWithPrimary(stored []models.PasswordMetadata) (map[string]models.PasswordMetadata, string, error) {
var primary string
result := make(map[string]models.PasswordMetadata)
for _, p := range stored {
result[p.Label] = p
if p.Primary {
switch primary {
case "":
primary = p.Label
default:
return nil, "", errors.New("corrupt database - more than one primary found in table password_metadata")
}
}
}
return result, primary, nil
}
func encryptor(secret string, salt []byte) gcmencryptor.GCMEncryptor {
switch {
case len(secret) < 20:
panic("invalid secret complexity for key generation")
case len(salt) != 32:
panic("invalid salt complexity for key generation")
}
var key [32]byte
copy(key[:], pbkdf2.Key([]byte(secret), salt, 100000, 32, sha256.New))
return gcmencryptor.New(key)
}
|
// Copyright 2017 orijtech, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"errors"
"strings"
"sync"
"github.com/odeke-em/redtable"
"github.com/orijtech/authmid"
)
type redisConnector struct {
closeOnce sync.Once
c *redtable.Client
hTableName string
}
func New(hashTableName, dbURL string) (authmid.Backend, error) {
if strings.TrimSpace(hashTableName) == "" {
return nil, authmid.ErrEmptyTableName
}
c, err := redtable.New(dbURL)
if err != nil {
return nil, err
}
return &redisConnector{c: c, hTableName: hashTableName}, nil
}
var _ authmid.Backend = (*redisConnector)(nil)
func (rc *redisConnector) LookupSecret(apiKey string) ([]byte, error) {
value, err := rc.c.HGet(rc.hTableName, apiKey)
if err != nil {
return nil, err
}
var secret []byte
switch typedV := value.(type) {
case []byte:
secret = typedV
case string:
secret = []byte(typedV)
}
if secret == nil {
return nil, authmid.ErrNoSuchAPIKey
}
return secret, nil
}
func (rc *redisConnector) UpsertSecret(apiKey, apiSecret string) error {
_, err := rc.c.HSet(rc.hTableName, apiKey, apiSecret)
return err
}
func (rc *redisConnector) DeleteAPIKey(apiKey string) error {
n, err := rc.c.HDel(rc.hTableName, apiKey)
if err != nil {
return err
}
return errOnNoRowsAffected(n)
}
var errNoEntriesMatched = errors.New("no entries matched")
func errOnNoRowsAffected(n interface{}) error {
var nr int64
switch typedValue := n.(type) {
case int64:
nr = typedValue
case int:
nr = int64(typedValue)
case uint:
nr = int64(typedValue)
case uint64:
nr = int64(typedValue)
default:
return nil
}
if nr <= 0 {
return errNoEntriesMatched
}
return nil
}
var errAlreadyClosed = errors.New("already closed")
func (rc *redisConnector) Close() error {
var err error = errAlreadyClosed
rc.closeOnce.Do(func() {
err = rc.c.Close()
})
return err
}
|
package main
import (
"container/list"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
)
func GET(urlFlag string) []string {
resp, err := http.Get(urlFlag)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
reqURL := resp.Request.URL
baseURL := &url.URL{
Scheme: reqURL.Scheme,
Host: reqURL.Host,
}
base := baseURL.String()
return filter(base, buildLinks(resp.Body, base))
}
func filter(base string, links []string) []string {
var fl []string
//filtered links
for _, x := range links {
if strings.HasPrefix(x, base) {
fl = append(fl, x)
}
}
return fl
}
func buildLinks(body io.Reader, base string) []string {
data, _ := ioutil.ReadAll(body)
var res []Link
res = parse(string(data))
var hrefs []string
for _, link := range res {
switch {
case link.Href[0] == '/':
hrefs = append(hrefs, base+link.Href)
case strings.HasPrefix(link.Href, "http"):
//http is a common prefix for both http and https
hrefs = append(hrefs, link.Href)
}
}
return hrefs
}
func bfs(urlFlag string, height int) []string {
visited := make(map[string]bool)
q := list.New()
nq := list.New()
nq.PushBack(urlFlag)
for i := 0; i <= height; i++ {
q, nq = nq, list.New()
for temp := q.Front(); temp != nil; temp = temp.Next() {
if visited[string(temp.Value.(string))] == true {
continue
}
visited[temp.Value.(string)] = true
for _, x := range GET(temp.Value.(string)) {
nq.PushBack(x)
}
}
}
var vis []string
for x, _ := range visited {
vis = append(vis, x)
}
return vis
}
func main() {
urlFlag := flag.String("url", "http://gophercises.com", "your choice of URL")
height := flag.Int("height", 2, "depth to which tree is traversed")
flag.Parse()
//do not name var url - leads to clash with net/url
pages := bfs(*urlFlag, *height)
for _, x := range pages {
fmt.Println(x)
}
}
|
package types
type DataplaneTokenRequest struct {
Name string `json:"name"`
Mesh string `json:"mesh"`
Tags map[string][]string `json:"tags"`
Type string `json:"type"`
}
|
package writer
import (
"fmt"
"io"
"sort"
"strings"
"text/template"
"time"
"github.com/urfave/cli"
)
func New(app *cli.App) *Cli {
now := time.Now()
return &Cli{
App: app,
Date: fmt.Sprintf("%s %d", now.Month(), now.Year()),
Commands: prepareCommands(app.Commands, 0),
GlobalArgs: prepareArgsWithValues(app.Flags),
SynopsisArgs: prepareArgsSynopsis(app.Flags),
}
}
func (c *Cli) Write(w io.Writer) error {
const name = "cli"
t, err := template.New(name).Parse(cliTemplateString)
if err != nil {
return err
}
return t.ExecuteTemplate(w, name, c)
}
const nl = "\n"
const noDescription = "_no description available_"
func prepareCommands(commands []cli.Command, level int) []string {
coms := []string{}
for i := range commands {
command := &commands[i]
prepared := strings.Repeat("#", level+2) + " " +
strings.Join(command.Names(), ", ") + nl
usage := noDescription
if command.Usage != "" {
usage = command.Usage
}
prepared += nl + usage + nl
flags := prepareArgsWithValues(command.Flags)
if len(flags) > 0 {
prepared += nl
}
prepared += strings.Join(flags, nl)
if len(flags) > 0 {
prepared += nl
}
coms = append(coms, prepared)
// recursevly iterate subcommands
if len(command.Subcommands) > 0 {
coms = append(
coms,
prepareCommands(command.Subcommands, level+1)...,
)
}
}
return coms
}
func prepareArgsWithValues(flags []cli.Flag) []string {
return prepareFlags(flags, ", ", "**", "**", `""`, true)
}
func prepareArgsSynopsis(flags []cli.Flag) []string {
return prepareFlags(flags, "|", "[", "]", "[value]", false)
}
func prepareFlags(
flags []cli.Flag,
sep, opener, closer, value string,
addDetails bool,
) []string {
args := []string{}
for _, flag := range flags {
modifiedArg := opener
for _, s := range strings.Split(flag.GetName(), ",") {
trimmed := strings.TrimSpace(s)
if len(modifiedArg) > len(opener) {
modifiedArg += sep
}
if len(trimmed) > 1 {
modifiedArg += "--" + trimmed
} else {
modifiedArg += "-" + trimmed
}
}
modifiedArg += closer
if flagTakesValue(flag) {
modifiedArg += "=" + value
}
if addDetails {
modifiedArg += flagDetails(flag)
}
args = append(args, modifiedArg)
}
sort.Strings(args)
return args
}
// flagTakesValue returns true if the flag takes a value, otherwise false
func flagTakesValue(flag cli.Flag) bool {
if _, ok := flag.(cli.BoolFlag); ok {
return false
}
if _, ok := flag.(cli.BoolTFlag); ok {
return false
}
if _, ok := flag.(cli.DurationFlag); ok {
return true
}
if _, ok := flag.(cli.Float64Flag); ok {
return true
}
if _, ok := flag.(cli.GenericFlag); ok {
return true
}
if _, ok := flag.(cli.Int64Flag); ok {
return true
}
if _, ok := flag.(cli.IntFlag); ok {
return true
}
if _, ok := flag.(cli.IntSliceFlag); ok {
return true
}
if _, ok := flag.(cli.Int64SliceFlag); ok {
return true
}
if _, ok := flag.(cli.StringFlag); ok {
return true
}
if _, ok := flag.(cli.StringSliceFlag); ok {
return true
}
if _, ok := flag.(cli.Uint64Flag); ok {
return true
}
if _, ok := flag.(cli.UintFlag); ok {
return true
}
return false
}
// flagDetails returns a string containing the flags metadata
func flagDetails(flag cli.Flag) string {
description := ""
value := ""
if f, ok := flag.(cli.BoolFlag); ok {
description = f.Usage
}
if f, ok := flag.(cli.BoolTFlag); ok {
description = f.Usage
}
if f, ok := flag.(cli.DurationFlag); ok {
description = f.Usage
value = f.Value.String()
}
if f, ok := flag.(cli.Float64Flag); ok {
description = f.Usage
value = fmt.Sprintf("%f", f.Value)
}
if f, ok := flag.(cli.GenericFlag); ok {
description = f.Usage
if f.Value != nil {
value = f.Value.String()
}
}
if f, ok := flag.(cli.Int64Flag); ok {
description = f.Usage
value = fmt.Sprintf("%d", f.Value)
}
if f, ok := flag.(cli.IntFlag); ok {
description = f.Usage
value = fmt.Sprintf("%d", f.Value)
}
if f, ok := flag.(cli.IntSliceFlag); ok {
description = f.Usage
if f.Value != nil {
value = f.Value.String()
}
}
if f, ok := flag.(cli.Int64SliceFlag); ok {
description = f.Usage
if f.Value != nil {
value = f.Value.String()
}
}
if f, ok := flag.(cli.StringFlag); ok {
description = f.Usage
value = f.Value
}
if f, ok := flag.(cli.StringSliceFlag); ok {
description = f.Usage
if f.Value != nil {
value = f.Value.String()
}
}
if f, ok := flag.(cli.Uint64Flag); ok {
description = f.Usage
value = fmt.Sprintf("%d", f.Value)
}
if f, ok := flag.(cli.UintFlag); ok {
description = f.Usage
value = fmt.Sprintf("%d", f.Value)
}
if description == "" {
description = noDescription
}
if value != "" {
description += " (default: " + value + ")"
}
return ": " + description
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/11/18 9:19 上午
# @File : sort_test.go
# @Description :
# @Attention :
*/
package sort
import (
"fmt"
"github.com/stretchr/testify/require"
"testing"
)
var (
arr = []int{0, 3, 2, 1, 9, 8}
exceptedRet = []int{0, 1, 2, 3, 8, 9}
)
func TestBuble(t *testing.T) {
ret := bubbleSort(arr)
fmt.Println(ret)
}
func TestSelect(t *testing.T) {
ret := SelectionSort(arr)
fmt.Println(ret)
require.Equal(t, ret, exceptedRet)
}
func TestInsert(t *testing.T) {
ret := InsertionSort(arr)
fmt.Println(arr)
require.Equal(t, ret, exceptedRet)
}
func TestMerge(t *testing.T) {
ret := mergeSort(arr)
fmt.Println(ret)
require.Equal(t, ret, exceptedRet)
}
func TestQSort(t *testing.T) {
ret := quickSort([]int{1,2,3,1})
fmt.Println(ret)
require.Equal(t, ret, exceptedRet)
}
func TestShell(t *testing.T) {
ret := shellSort(arr)
fmt.Println(ret)
require.Equal(t, ret, exceptedRet)
}
|
package mocks
import (
"errors"
"fmt"
"testing"
"github.com/golang/protobuf/proto"
"github.com/liquidm/llsr"
"github.com/liquidm/llsr/decoderbufs"
)
type testReporterMock struct {
errors []string
}
func newTestReporterMock() *testReporterMock {
return &testReporterMock{errors: make([]string, 0)}
}
func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
}
func TestMockClientImplementsClientInterface(t *testing.T) {
var c interface{} = &Client{}
if _, ok := c.(llsr.Client); !ok {
t.Error("The mock client should implement llsr.Client interface")
}
}
func TestClientHandlesUpdateExpectations(t *testing.T) {
client := NewClient(t, &DummyConverter{})
defer func() {
client.Close()
}()
client.ExpectYieldMessage(&decoderbufs.RowMessage{Table: proto.String("users"), Op: decoderbufs.Op_INSERT.Enum()})
client.ExpectYieldEvent(&llsr.Event{Type: llsr.EventReconnect})
msg := <-client.Updates()
if msg != "INSERT users" {
t.Errorf("Expected to receive foo message got %s instead", msg)
}
event := <-client.Events()
if event.Type != llsr.EventReconnect {
t.Errorf("Expected to receive llsr.EventReconnect got %s instead", event.Type)
}
}
func TestClientMeetsNotAllMessagesConsumedError(t *testing.T) {
trm := newTestReporterMock()
client := NewClient(trm, &DummyConverter{})
client.ExpectYieldMessage(&decoderbufs.RowMessage{Table: proto.String("users"), Op: decoderbufs.Op_INSERT.Enum()})
client.Close()
if len(trm.errors) == 0 {
t.Errorf("Expected to return error if not all messages consumed")
}
}
func TestExpectReconnectEvent(t *testing.T) {
client := NewClient(t, &DummyConverter{})
client.ExpectReconnectEvent()
event := <-client.Events()
if event.Type != llsr.EventReconnect {
t.Error("Expected to receive llsr.EventReconnect event")
}
}
func TestExpectEventBackendStdErr(t *testing.T) {
client := NewClient(t, &DummyConverter{})
client.ExpectBackendStdErrEvent("stderr output")
event := <-client.Events()
if event.Type != llsr.EventBackendStdErr {
t.Error("Expected to receive llsr.EventBackendStdErr event")
}
if event.Value.(string) != "stderr output" {
t.Errorf("Expected event to contain '%s' value, got: %v", "stderr output", event.Value)
}
}
func TestExpect(t *testing.T) {
client := NewClient(t, &DummyConverter{})
client.ExpectBackendInvalidExitStatusEvent(errors.New(""))
event := <-client.Events()
if event.Type != llsr.EventBackendInvalidExitStatus {
t.Error("Expected to receive llsr.EventBackendInvalidExitStatus event")
}
}
|
package v1beta3
import (
"reflect"
"testing"
next "github.com/devspace-cloud/devspace/pkg/devspace/config/versions/v1beta4"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/ptr"
yaml "gopkg.in/yaml.v2"
)
type testCase struct {
in *Config
expected *next.Config
}
func TestSimple(t *testing.T) {
testCases := []*testCase{
{
in: &Config{
Deployments: []*DeploymentConfig{
{
Name: "Test",
Component: &ComponentConfig{
Containers: []*ContainerConfig{
{
Name: "container-1",
},
},
},
},
},
},
expected: &next.Config{
Deployments: []*next.DeploymentConfig{
{
Name: "Test",
Helm: &next.HelmConfig{
ComponentChart: ptr.Bool(true),
Values: map[interface{}]interface{}{
"containers": []interface{}{
map[interface{}]interface{}{
"name": "container-1",
},
},
},
},
},
},
},
},
{
in: &Config{
Deployments: []*DeploymentConfig{
{
Name: "Test",
Component: &ComponentConfig{
Containers: []*ContainerConfig{
{
Name: "container-1",
},
},
Options: &ComponentConfigOptions{
Force: ptr.Bool(true),
Wait: ptr.Bool(false),
},
},
},
},
},
expected: &next.Config{
Deployments: []*next.DeploymentConfig{
{
Name: "Test",
Helm: &next.HelmConfig{
ComponentChart: ptr.Bool(true),
Values: map[interface{}]interface{}{
"containers": []interface{}{
map[interface{}]interface{}{
"name": "container-1",
},
},
},
Force: ptr.Bool(true),
Wait: ptr.Bool(false),
},
},
},
},
},
{
in: &Config{
Deployments: []*DeploymentConfig{
{
Name: "Test",
Helm: &HelmConfig{
Chart: &ChartConfig{
Name: "component-chart",
RepoURL: "https://charts.devspace.cloud",
Version: "v0.0.6",
},
Values: map[interface{}]interface{}{
"containers": []interface{}{
map[interface{}]interface{}{
"name": "container-1",
},
},
},
Force: ptr.Bool(true),
Wait: ptr.Bool(false),
},
},
},
},
expected: &next.Config{
Deployments: []*next.DeploymentConfig{
{
Name: "Test",
Helm: &next.HelmConfig{
ComponentChart: ptr.Bool(true),
Values: map[interface{}]interface{}{
"containers": []interface{}{
map[interface{}]interface{}{
"name": "container-1",
},
},
},
Force: ptr.Bool(true),
Wait: ptr.Bool(false),
},
},
},
},
},
}
// Run test cases
for index, testCase := range testCases {
newConfig, err := testCase.in.Upgrade(log.Discard)
if err != nil {
t.Fatalf("Error: %v", err)
}
isEqual := reflect.DeepEqual(newConfig, testCase.expected)
if !isEqual {
newConfigYaml, _ := yaml.Marshal(newConfig)
expectedYaml, _ := yaml.Marshal(testCase.expected)
t.Fatalf("TestCase %d: Got %s, but expected %s", index, newConfigYaml, expectedYaml)
}
}
}
type testCasePaths struct {
in map[string]string
expected map[string]string
}
func TestUpgradeVarPaths(t *testing.T) {
config := &Config{}
testCases := []*testCasePaths{
{
in: map[string]string{
".deployments[1].abc": "test1",
".deployments[1].component.abc": "test2",
".deployments[1].component.options.abc": "test3",
".deployments.notReplace.bcd": "test4",
".dev.notreplace": "test5",
},
expected: map[string]string{
".deployments[1].abc": "test1",
".deployments[1].helm.values.abc": "test2",
".deployments[1].helm.abc": "test3",
".deployments.notReplace.bcd": "test4",
".dev.notreplace": "test5",
},
},
}
// Run test cases
for index, testCase := range testCases {
err := config.UpgradeVarPaths(testCase.in, log.Discard)
if err != nil {
t.Fatalf("Error: %v", err)
}
isEqual := reflect.DeepEqual(testCase.in, testCase.expected)
if !isEqual {
newConfigYaml, _ := yaml.Marshal(testCase.in)
expectedYaml, _ := yaml.Marshal(testCase.expected)
t.Fatalf("TestCase %d: Got %s, but expected %s", index, newConfigYaml, expectedYaml)
}
}
}
|
package migemo
import "errors"
// LevelU8 は、Loudsツリーの深さ毎のloudsやlabelを格納した構造体
type LevelU8 struct {
louds []bool
outs []bool
labels []byte
}
// LoudsTrieBuilderU8 は、LoudsTrieを生成するための構造体
type LoudsTrieBuilderU8 struct {
levels []LevelU8
lastKey []uint8
}
// NewLoudsTrieBuilderU8 は、LoudsTrieBuilderを初期化する
func NewLoudsTrieBuilderU8() *LoudsTrieBuilderU8 {
level0 := LevelU8{
louds: []bool{true, false},
outs: []bool{false},
labels: []uint8{' ', ' '},
}
level1 := LevelU8{
louds: []bool{false},
}
levels := []LevelU8{level0, level1}
return &LoudsTrieBuilderU8{
levels: levels,
lastKey: []uint8{},
}
}
// Add は、LoudsTrieBuliderにキーを追加する(追加するキーは辞書順)
func (builder *LoudsTrieBuilderU8) Add(key string) error {
if string(key) <= string(builder.lastKey) {
return errors.New("key must be larger than last added key")
}
if len(key) == 0 {
builder.levels[0].outs[0] = true
return nil
}
if len(key)+1 >= len(builder.levels) {
builder.levels = append(builder.levels, make([]LevelU8, len(key)+2-len(builder.levels))...)
}
i := 0
for ; i < len(key); i++ {
var level = &builder.levels[i+1]
if (i == len(builder.lastKey)) || key[i] != level.labels[len(level.labels)-1] {
level.louds[len(builder.levels[i+1].louds)-1] = true
level.louds = append(level.louds, false)
level.outs = append(level.outs, false)
level.labels = append(level.labels, key[i])
break
}
}
for i++; i < len(key); i++ {
var level = &builder.levels[i+1]
level.louds = append(level.louds, true, false)
level.outs = append(level.outs, false)
level.labels = append(level.labels, key[i])
}
builder.levels[len(key)+1].louds = append(builder.levels[len(key)+1].louds, true)
builder.levels[len(key)].outs[len(builder.levels[len(key)].outs)-1] = true
builder.lastKey = make([]uint8, len(key))
copy(builder.lastKey, key)
return nil
}
// Build は、LoudsTrieBuilderに追加した文字列からLoudsTrieを生成する
func (builder *LoudsTrieBuilderU8) Build() *LoudsTrieU8 {
louds := []bool{}
outs := []bool{}
labels := []uint8{}
for _, level := range builder.levels {
louds = append(louds, level.louds...)
outs = append(outs, level.outs...)
labels = append(labels, level.labels...)
}
louds = louds[:len(louds)-1]
words := make([]uint64, (len(louds)+63)/64)
for i := 0; i < len(louds); i++ {
if louds[i] {
words[i/64] |= 1 << (i % 64)
}
}
var bitVector = NewBitVector(words, uint32(len(louds)))
return NewLoudsTrieU8(bitVector, labels)
}
|
package models
import (
"github.com/astaxie/beego/orm"
"time"
)
//查询的类
type FinancialProductHistoricalRecordQueryParam struct {
BaseQueryParam
StartTime int64 `json:"startTime"` //开始时间
EndTime int64 `json:"endTime"` //截止时间
ConfId string `json:"confId"` //id
Category string `json:"category"` //活期定期
}
func (a *FinancialProductHistoricalRecord) TableName() string {
return FinancialProductHistoricalRecordTBName()
}
//数据历史记录
type FinancialProductHistoricalRecord struct {
Id int `orm:"pk;column(id)"json:"id"form:"id"`
Config *FinancialProduct `orm:"rel(fk);column(config)"json:"-"form:"-"`
ConfId int `orm:"-"json:"confId"form:"confId"`
Admin *AdminBackendUser `orm:"rel(fk);column(admin_id)"json:"-"form:"-"`
AdminId int `orm:"-"json:"adminId"form:"adminId"`
//新利率
NewRate float64 `orm:"column(new_rate)"json:"newRate"form:"newRate"`
//原利率
OldRate float64 `orm:"column(old_rate)"json:"oldRate"form:"oldRate"`
//类型 1活期 2定期
Category int `orm:"column(category)"json:"category"form:"category"`
//描述
Msg string `orm:"column(msg)"json:"msg"form:"msg"`
//创建时间
CreateTime time.Time `orm:"auto_now_add;type(datetime);column(create_time)"json:"createTime"form:"createTime"`
//类型 集合[新增,编辑]
RecordType string `orm:"column(record_type)"json:"recordType"form:"recordType"`
Name string `orm:"-"json:"name"form:"name"`
}
func FinancialConfigHistoricalPageList(params *FinancialProductHistoricalRecordQueryParam) ([]*FinancialProductHistoricalRecord, int64) {
o := orm.NewOrm()
query := o.QueryTable(FinancialProductHistoricalRecordTBName())
data := make([]*FinancialProductHistoricalRecord, 0)
//默认排序
sortorder := "id"
switch params.Sort {
case "id":
sortorder = "id"
}
if params.Order == "desc" {
sortorder = "-" + sortorder
}
if params.ConfId != "" {
query = query.Filter("Config__id__exact", params.ConfId)
}
if params.Category != "" {
query = query.Filter("category__exact", params.Category)
}
total, _ := query.Count()
query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit).All(&data)
for _,obj := range data{
if obj.Config != nil{
obj.ConfId = obj.Config.Id
}
if obj.Admin != nil{
obj.Name = obj.Admin.UserName
obj.AdminId = obj.Admin.Id
}
}
return data, total
}
|
package main
import (
"gopkg.in/mgo.v2/bson"
"time"
)
type (
// Component represents the structure of our resource
// TODO: Should include jarfile / gemfile / ...
Component struct {
ID bson.ObjectId `json:"id" bson:"_id"`
Name string `json:"name" bson:"name"`
Resource string `json:"resource" bson:"resource"`
Type string `json:"type" bson:"type"`
SubType string `json:"sub_type" bson:"sub_type"`
Version string `json:"version" bson:"version"`
VersionManifest string `json:"version_manifest" bson:"version_manifest"`
VersionFile string `json:"version_file" bson:"version_file"`
}
// InstanceMongo represents the structure of a instance/host in Mongo
InstanceMongo struct {
ID bson.ObjectId `json:"id" bson:"_id"`
Hostname string `json:"hostname" bson:"hostname"`
Components []bson.ObjectId `json:"components" bson:"components"`
IPs map[string][]ResolvedURL `json:"ips" bson:"ips"`
Hashes map[string][]string `json:"hashes" bson:"hashes"`
}
// Instance represents the structure of a instance/host
Instance struct {
ID bson.ObjectId `json:"id" bson:"_id"`
Hostname string `json:"hostname" bson:"hostname"`
Components []Component `json:"components" bson:"components"`
IPs map[string][]ResolvedURL `json:"ips" bson:"ips"`
Hashes map[string][]string `json:"hashes" bson:"hashes"`
}
// VulnerabilitySummary is a summary of a vulnerability.
VulnerabilitySummary struct {
Name string `json:"name" bson:"name"`
Timestamp time.Time `json:"timestamp" bson:"timestamp"`
Score int `json:"score" bson:"score"`
URL string `json:"url" bson:"url"`
Components []Component `json:"components" bson:"components"`
Hosts []string `json:"hosts" bson:"hosts"`
}
// ResolvedURL is a single resolved URL.
ResolvedURL struct {
Name string `json:"name" bson:"name"`
IP string `json:"ip" bson:"ip"`
}
)
|
package main
import (
"fmt"
"time"
"log"
"os"
"path/filepath"
"strings"
)
type test struct{
name string
ip string
}
func substr(s string, pos, length int) string {
runes := []rune(s)
l := pos + length
if l > len(runes) {
l = len(runes)
}
return string(runes[pos:l])
}
func main(){
var ted map[string]*test
ted = make(map[string]*test)
temp := &test{
name: "haha",
ip: "192.168.168.106",
}
ted["qwer"] = temp
ted["32"] = temp
for k,v:= range ted {
v.ip = k
fmt.Printf("key : %v, value : %v\n",k,v.ip)
}
fmt.Println(ted)
ted["qwer"].ip = "123243"
time.Sleep(10 * time.Second)
fmt.Println(ted["qwer"].ip)
//测试路径
fmt.Println(os.Args[0])
fmt.Println(filepath.Dir(os.Args[0]))
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
fmt.Println(dir)
path:= substr(dir, 0, strings.LastIndex(dir, "\\"))
fmt.Println(path)
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
var (
mockStore = NewMockStorage()
server *httptest.Server
)
func init() {
var app = &App{
Storage: mockStore,
Cache: NewCache(10),
}
app.Router = NewRouter(app)
server = httptest.NewServer(app.Router)
}
func TestAdd(t *testing.T) {
var (
apiurl = server.URL + "/add"
payload = url.Values{"url": []string{"google.com"}}
)
resp, err := http.PostForm(apiurl, payload)
if err != nil {
t.Error(err)
}
defer resp.Body.Close()
if body, err := ioutil.ReadAll(resp.Body); err != nil {
t.Error(err)
} else {
t.Logf("response: %s", string(body))
}
if resp.StatusCode != 201 {
t.Errorf("expected 201 but got: %d", resp.StatusCode)
}
}
func TestCheck(t *testing.T) {
var (
hash = makeHash("google.com")
query = url.Values{"hash": []string{hash}}
apiurl = server.URL + "/check" + fmt.Sprintf("?%s", query.Encode())
)
dburl := Url{
Id: 1,
Hash: hash,
Url: "http://google.com",
}
mockStore.urls[hash] = dburl
resp, err := http.Get(apiurl)
if err != nil {
t.Error(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error(err)
} else {
t.Logf("response: %s", string(body))
}
if resp.StatusCode != 200 {
t.Errorf("expected 200 but got: %d", resp.StatusCode)
}
b, _ := json.Marshal(dburl)
if !bytes.Equal(b, bytes.TrimSpace(body)) {
t.Errorf("expected '%s' but got: '%s'", string(b), string(body))
}
}
|
package mysql
import (
"InkaTry/warehouse-storage-be/internal/pkg/stores"
"context"
)
const (
listwarehousesQuery = `
SELECT id, name from warehouses where deleted = 0;
`
)
func (c *Client) ListWarehouses(ctx context.Context) (stores.Results, error) {
var dest []stores.Result
stmt, err := c.preparedStmt(listwarehousesQuery)
if err != nil {
return nil, err
}
if err = stmt.SelectContext(ctx, &dest); err != nil {
return nil, err
}
return dest, nil
}
|
package influxql_test
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/influxdata/influxql"
)
// Ensure the parser can parse a multi-statement query.
func TestParser_ParseQuery(t *testing.T) {
s := `SELECT a FROM b; SELECT c FROM d`
q, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if len(q.Statements) != 2 {
t.Fatalf("unexpected statement count: %d", len(q.Statements))
}
}
func TestParser_ParseQuery_TrailingSemicolon(t *testing.T) {
s := `SELECT value FROM cpu;`
q, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if len(q.Statements) != 1 {
t.Fatalf("unexpected statement count: %d", len(q.Statements))
}
}
// Ensure the parser can parse an empty query.
func TestParser_ParseQuery_Empty(t *testing.T) {
q, err := influxql.NewParser(strings.NewReader(``)).ParseQuery()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if len(q.Statements) != 0 {
t.Fatalf("unexpected statement count: %d", len(q.Statements))
}
}
// Ensure the parser will skip comments.
func TestParser_ParseQuery_SkipComments(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT * FROM cpu; -- read from cpu database
/* create continuous query */
CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN
SELECT mean(*) INTO db1..:MEASUREMENT FROM cpu GROUP BY time(5m)
END;
/* just a multline comment
what is this doing here?
**/
-- should ignore the trailing multiline comment /*
SELECT mean(value) FROM gpu;
-- trailing comment at the end`)
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if len(q.Statements) != 3 {
t.Fatalf("unexpected statement count: %d", len(q.Statements))
}
}
// Ensure the parser can return an error from an malformed statement.
func TestParser_ParseQuery_ParseError(t *testing.T) {
_, err := influxql.NewParser(strings.NewReader(`SELECT`)).ParseQuery()
if err == nil || err.Error() != `found EOF, expected identifier, string, number, bool at line 1, char 8` {
t.Fatalf("unexpected error: %s", err)
}
}
func TestParser_ParseQuery_NoSemicolon(t *testing.T) {
_, err := influxql.NewParser(strings.NewReader(`CREATE DATABASE foo CREATE DATABASE bar`)).ParseQuery()
if err == nil || err.Error() != `found CREATE, expected ; at line 1, char 21` {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure the parser can parse strings into Statement ASTs.
func TestParser_ParseStatement(t *testing.T) {
// For use in various tests.
now := time.Now()
var tests = []struct {
skip bool
s string
params map[string]interface{}
stmt influxql.Statement
err string
}{
// SELECT * statement
{
s: `SELECT * FROM myseries`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: `SELECT * FROM myseries GROUP BY *`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
},
},
{
s: `SELECT field1, * FROM myseries GROUP BY *`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.VarRef{Val: "field1"}},
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
},
},
{
s: `SELECT *, field1 FROM myseries GROUP BY *`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
{Expr: &influxql.VarRef{Val: "field1"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
},
},
// SELECT statement
{
s: fmt.Sprintf(`SELECT mean(field1), sum(field2), count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}},
{Expr: &influxql.Call{Name: "sum", Args: []influxql.Expr{&influxql.VarRef{Val: "field2"}}}},
{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field3"}}}, Alias: "field_x"},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Condition: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
RHS: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}},
SortFields: []*influxql.SortField{
{Ascending: false},
},
Limit: 20,
Offset: 10,
},
},
{
s: `SELECT "foo.bar.baz" AS foo FROM myseries`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: `SELECT "foo.bar.baz" AS foo FROM foo`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "foo"}},
},
},
// sample
{
s: `SELECT sample(field1, 100) FROM myseries;`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "sample", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 100}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
// derivative
{
s: `SELECT derivative(field1, 1h) FROM myseries;`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "derivative", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.DurationLiteral{Val: time.Hour}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: fmt.Sprintf(`SELECT derivative(field1, 1h) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "derivative", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.DurationLiteral{Val: time.Hour}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
{
s: `SELECT derivative(field1, 1h) / derivative(field2, 1h) FROM myseries`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.BinaryExpr{
LHS: &influxql.Call{
Name: "derivative",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
&influxql.DurationLiteral{Val: time.Hour},
},
},
RHS: &influxql.Call{
Name: "derivative",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field2"},
&influxql.DurationLiteral{Val: time.Hour},
},
},
Op: influxql.DIV,
},
},
},
Sources: []influxql.Source{
&influxql.Measurement{Name: "myseries"},
},
},
},
// difference
{
s: `SELECT difference(field1) FROM myseries;`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "difference", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: fmt.Sprintf(`SELECT difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "difference",
Args: []influxql.Expr{
&influxql.Call{
Name: "max",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// non_negative_difference
{
s: `SELECT non_negative_difference(field1) FROM myseries;`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "non_negative_difference", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: fmt.Sprintf(`SELECT non_negative_difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "non_negative_difference",
Args: []influxql.Expr{
&influxql.Call{
Name: "max",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// moving_average
{
s: `SELECT moving_average(field1, 3) FROM myseries;`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "moving_average", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 3}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
{
s: fmt.Sprintf(`SELECT moving_average(max(field1), 3) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "moving_average",
Args: []influxql.Expr{
&influxql.Call{
Name: "max",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
&influxql.IntegerLiteral{Val: 3},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// cumulative_sum
{
s: fmt.Sprintf(`SELECT cumulative_sum(field1) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "cumulative_sum",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
{
s: fmt.Sprintf(`SELECT cumulative_sum(mean(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "cumulative_sum",
Args: []influxql.Expr{
&influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// holt_winters
{
s: fmt.Sprintf(`SELECT holt_winters(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{
Name: "holt_winters",
Args: []influxql.Expr{
&influxql.Call{
Name: "first",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
&influxql.IntegerLiteral{Val: 3},
&influxql.IntegerLiteral{Val: 1},
},
}},
},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 1 * time.Hour},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
{
s: fmt.Sprintf(`SELECT holt_winters_with_fit(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{
Name: "holt_winters_with_fit",
Args: []influxql.Expr{
&influxql.Call{
Name: "first",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
&influxql.IntegerLiteral{Val: 3},
&influxql.IntegerLiteral{Val: 1},
}}},
},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 1 * time.Hour},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
{
s: fmt.Sprintf(`SELECT holt_winters(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "holt_winters",
Args: []influxql.Expr{
&influxql.Call{
Name: "max",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
&influxql.IntegerLiteral{Val: 4},
&influxql.IntegerLiteral{Val: 5},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
{
s: fmt.Sprintf(`SELECT holt_winters_with_fit(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{
Expr: &influxql.Call{
Name: "holt_winters_with_fit",
Args: []influxql.Expr{
&influxql.Call{
Name: "max",
Args: []influxql.Expr{
&influxql.VarRef{Val: "field1"},
},
},
&influxql.IntegerLiteral{Val: 4},
&influxql.IntegerLiteral{Val: 5},
},
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Minute},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// SELECT statement (lowercase)
{
s: `select my_field from myseries`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "my_field"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
// SELECT statement (lowercase) with quoted field
{
s: `select 'my_field' from myseries`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.StringLiteral{Val: "my_field"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
// SELECT statement with multiple ORDER BY fields
{
skip: true,
s: `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
SortFields: []*influxql.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
// SELECT statement with SLIMIT and SOFFSET
{
s: `SELECT field1 FROM myseries SLIMIT 10 SOFFSET 5`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
SLimit: 10,
SOffset: 5,
},
},
// SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/
{
s: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "serverC"},
},
RHS: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(".*west.*")},
},
},
},
},
// select percentile statements
{
s: `select percentile("field1", 2.0) from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `select percentile("field1", 2.0), field2 from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}},
{Expr: &influxql.VarRef{Val: "field2"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// select top statements
{
s: `select top("field1", 2) from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `select top(field1, 2) from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `select top(field1, 2), tag1 from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.IntegerLiteral{Val: 2}}}},
{Expr: &influxql.VarRef{Val: "tag1"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `select top(field1, tag1, 2), tag1 from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.IntegerLiteral{Val: 2}}}},
{Expr: &influxql.VarRef{Val: "tag1"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// select distinct statements
{
s: `select distinct(field1) from cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "distinct", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `select distinct field2 from network`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Distinct{Val: "field2"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "network"}},
},
},
{
s: `select count(distinct field3) from metrics`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.Distinct{Val: "field3"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "metrics"}},
},
},
{
s: `select count(distinct field3), sum(field4) from metrics`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.Distinct{Val: "field3"}}}},
{Expr: &influxql.Call{Name: "sum", Args: []influxql.Expr{&influxql.VarRef{Val: "field4"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "metrics"}},
},
},
{
s: `select count(distinct(field3)), sum(field4) from metrics`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.Call{Name: "distinct", Args: []influxql.Expr{&influxql.VarRef{Val: "field3"}}}}}},
{Expr: &influxql.Call{Name: "sum", Args: []influxql.Expr{&influxql.VarRef{Val: "field4"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "metrics"}},
},
},
// SELECT * FROM WHERE time
{
s: fmt.Sprintf(`SELECT * FROM cpu WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
},
},
// SELECT * FROM WHERE field comparisons
{
s: `SELECT * FROM cpu WHERE load > 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load >= 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load = 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load <= 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LTE,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load < 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load != 100`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.NEQ,
LHS: &influxql.VarRef{Val: "load"},
RHS: &influxql.IntegerLiteral{Val: 100},
},
},
},
// SELECT * FROM /<regex>/
{
s: `SELECT * FROM /cpu.*/`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile("cpu.*")}},
},
},
},
// SELECT * FROM "db"."rp"./<regex>/
{
s: `SELECT * FROM "db"."rp"./cpu.*/`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{
Database: `db`,
RetentionPolicy: `rp`,
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile("cpu.*")}},
},
},
},
// SELECT * FROM "db"../<regex>/
{
s: `SELECT * FROM "db"../cpu.*/`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{
Database: `db`,
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile("cpu.*")}},
},
},
},
// SELECT * FROM "rp"./<regex>/
{
s: `SELECT * FROM "rp"./cpu.*/`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Sources: []influxql.Source{&influxql.Measurement{
RetentionPolicy: `rp`,
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile("cpu.*")}},
},
},
},
// SELECT statement with group by
{
s: `SELECT sum(value) FROM "kbps" WHERE time > now() - 120s AND deliveryservice='steam-dns' and cachegroup = 'total' GROUP BY time(60s)`,
stmt: &influxql.SelectStatement{
IsRawQuery: false,
Fields: []*influxql.Field{
{Expr: &influxql.Call{Name: "sum", Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "kbps"}},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 60 * time.Second}}}}},
Condition: &influxql.BinaryExpr{ // 1
Op: influxql.AND,
LHS: &influxql.BinaryExpr{ // 2
Op: influxql.AND,
LHS: &influxql.BinaryExpr{ //3
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: mustParseDuration("120s")},
},
},
RHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "deliveryservice"},
RHS: &influxql.StringLiteral{Val: "steam-dns"},
},
},
RHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "cachegroup"},
RHS: &influxql.StringLiteral{Val: "total"},
},
},
},
},
// SELECT statement with group by and multi digit duration (prevent regression from #731://github.com/influxdata/influxdb/pull/7316)
{
s: fmt.Sprintf(`SELECT count(value) FROM cpu where time < '%s' group by time(500ms)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "count",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 500 * time.Millisecond}}}}},
},
},
// SELECT statement with fill
{
s: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) fill(1)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
Fill: influxql.NumberFill,
FillValue: int64(1),
},
},
// SELECT statement with FILL(none) -- check case insensitivity
{
s: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(none)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
Fill: influxql.NoFill,
},
},
// SELECT statement with previous fill
{
s: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(previous)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
Fill: influxql.PreviousFill,
},
},
// SELECT statement with average fill
{
s: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(linear)`, now.UTC().Format(time.RFC3339Nano)),
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},
},
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
Fill: influxql.LinearFill,
},
},
// SELECT casts
{
s: `SELECT field1::float, field2::integer, field6::unsigned, field3::string, field4::boolean, field5::field, tag1::tag FROM cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{
Expr: &influxql.VarRef{
Val: "field1",
Type: influxql.Float,
},
},
{
Expr: &influxql.VarRef{
Val: "field2",
Type: influxql.Integer,
},
},
{
Expr: &influxql.VarRef{
Val: "field6",
Type: influxql.Unsigned,
},
},
{
Expr: &influxql.VarRef{
Val: "field3",
Type: influxql.String,
},
},
{
Expr: &influxql.VarRef{
Val: "field4",
Type: influxql.Boolean,
},
},
{
Expr: &influxql.VarRef{
Val: "field5",
Type: influxql.AnyField,
},
},
{
Expr: &influxql.VarRef{
Val: "tag1",
Type: influxql.Tag,
},
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a bound parameter
{
s: `SELECT value FROM cpu WHERE value > $value`,
params: map[string]interface{}{
"value": int64(2),
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{Val: "value"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "value"},
RHS: &influxql.IntegerLiteral{Val: 2},
},
},
},
// SELECT statement with a bound parameter that contains spaces
{
s: `SELECT value FROM cpu WHERE value > $"multi-word value"`,
params: map[string]interface{}{
"multi-word value": int64(2),
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{Val: "value"}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "value"},
RHS: &influxql.IntegerLiteral{Val: 2},
},
},
},
// SELECT statement with a field as a bound parameter.
{
s: `SELECT mean($field) FROM cpu`,
params: map[string]interface{}{
"field": map[string]interface{}{"identifier": "value"},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a function as a bound parameter.
{
s: `SELECT $fn(value) FROM cpu`,
params: map[string]interface{}{
"fn": map[string]interface{}{"identifier": "mean"},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a regex as a bound parameter.
{
s: `SELECT mean(value) FROM cpu WHERE host =~ $host`,
params: map[string]interface{}{
"host": map[string]interface{}{"regex": "^server.*"},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
}}},
},
Condition: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{
Val: "host",
},
RHS: &influxql.RegexLiteral{
Val: regexp.MustCompile(`^server.*`),
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a field and type as a bound parameter.
{
s: `SELECT $field::$type FROM cpu`,
params: map[string]interface{}{
"field": map[string]interface{}{"identifier": "value"},
"type": map[string]interface{}{"identifier": "integer"},
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{
Val: "value",
Type: influxql.Integer,
}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a float as a bound parameter.
{
s: `SELECT value FROM cpu WHERE value > $f`,
params: map[string]interface{}{
"f": map[string]interface{}{"float": 2.0},
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{
Val: "value",
}},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{
Val: "value",
},
RHS: &influxql.NumberLiteral{
Val: 2,
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a float as an integer in a bound parameter.
{
s: `SELECT value FROM cpu WHERE value > $f`,
params: map[string]interface{}{
"f": map[string]interface{}{"float": int64(2)},
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{
Val: "value",
}},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{
Val: "value",
},
RHS: &influxql.NumberLiteral{
Val: 2,
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with an integer in a bound parameter.
{
s: `SELECT value FROM cpu WHERE value > $i`,
params: map[string]interface{}{
"i": map[string]interface{}{"integer": int64(2)},
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.VarRef{
Val: "value",
}},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{
Val: "value",
},
RHS: &influxql.IntegerLiteral{
Val: 2,
},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with group by interval with a bound parameter.
{
s: `SELECT mean(value) FROM cpu GROUP BY time($interval)`,
params: map[string]interface{}{
"interval": map[string]interface{}{"duration": "10s"},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
}},
},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 10 * time.Second},
},
},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with group by interval integer with a bound parameter.
{
s: `SELECT mean(value) FROM cpu GROUP BY time($interval)`,
params: map[string]interface{}{
"interval": map[string]interface{}{"duration": int64(10 * time.Second)},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
}},
},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 10 * time.Second},
},
},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with group by interval integer with a bound parameter and nanosecond precision.
{
s: `SELECT mean(value) FROM cpu GROUP BY time($interval)`,
params: map[string]interface{}{
"interval": map[string]interface{}{"duration": int64(10)},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
}},
},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 10},
},
},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with group by interval json number with a bound parameter and nanosecond precision.
{
s: `SELECT mean(value) FROM cpu GROUP BY time($interval)`,
params: map[string]interface{}{
"interval": map[string]interface{}{"duration": json.Number("10")},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
}},
},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 10},
},
},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SELECT statement with a subquery
{
s: `SELECT sum(derivative) FROM (SELECT derivative(value) FROM cpu GROUP BY host) WHERE time >= now() - 1d GROUP BY time(1h)`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "sum",
Args: []influxql.Expr{
&influxql.VarRef{Val: "derivative"},
}},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Hour},
},
},
}},
Sources: []influxql.Source{
&influxql.SubQuery{
Statement: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "derivative",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.VarRef{Val: "host"},
}},
Sources: []influxql.Source{
&influxql.Measurement{Name: "cpu"},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 24 * time.Hour},
},
},
},
},
{
s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h)) WHERE time >= now() - 1d`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "sum",
Args: []influxql.Expr{
&influxql.VarRef{Val: "mean"},
}},
}},
Sources: []influxql.Source{
&influxql.SubQuery{
Statement: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Hour},
},
},
}},
Sources: []influxql.Source{
&influxql.Measurement{Name: "cpu"},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 24 * time.Hour},
},
},
},
},
{
s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu WHERE time >= now() - 1d GROUP BY time(1h))`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "sum",
Args: []influxql.Expr{
&influxql.VarRef{Val: "mean"},
}},
}},
Sources: []influxql.Source{
&influxql.SubQuery{
Statement: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Hour},
},
},
}},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 24 * time.Hour},
},
},
Sources: []influxql.Source{
&influxql.Measurement{Name: "cpu"},
},
},
},
},
},
},
{
s: `SELECT sum(derivative) FROM (SELECT derivative(mean(value)) FROM cpu GROUP BY host) WHERE time >= now() - 1d GROUP BY time(1h)`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "sum",
Args: []influxql.Expr{
&influxql.VarRef{Val: "derivative"},
}},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: time.Hour},
},
},
}},
Sources: []influxql.Source{
&influxql.SubQuery{
Statement: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "derivative",
Args: []influxql.Expr{
&influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"},
},
},
},
},
}},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.VarRef{Val: "host"},
}},
Sources: []influxql.Source{
&influxql.Measurement{Name: "cpu"},
},
},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 24 * time.Hour},
},
},
},
},
// select statements with intertwined comments
{
s: `SELECT "user" /*, system, idle */ FROM cpu`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.VarRef{Val: "user"}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
{
s: `SELECT /foo\/*bar/ FROM /foo\/*bar*/ WHERE x = 1`,
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.RegexLiteral{Val: regexp.MustCompile(`foo/*bar`)}},
},
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`foo/*bar*`)},
},
},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "x"},
RHS: &influxql.IntegerLiteral{Val: 1},
},
},
},
// SELECT statement with a time zone
{
s: `SELECT mean(value) FROM cpu WHERE time >= now() - 7d GROUP BY time(1d) TZ('America/Los_Angeles')`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"}},
}}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Condition: &influxql.BinaryExpr{
Op: influxql.GTE,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 7 * 24 * time.Hour},
},
},
Dimensions: []*influxql.Dimension{{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 24 * time.Hour}}}}},
Location: LosAngeles,
},
},
// EXPLAIN ...
{
s: `EXPLAIN SELECT * FROM cpu`,
stmt: &influxql.ExplainStatement{
Statement: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
},
// EXPLAIN VERBOSE ...
{
s: `EXPLAIN VERBOSE SELECT * FROM cpu`,
stmt: &influxql.ExplainStatement{
Statement: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
Verbose: true,
},
},
// EXPLAIN ANALYZE ...
{
s: `EXPLAIN ANALYZE SELECT * FROM cpu`,
stmt: &influxql.ExplainStatement{
Statement: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
Analyze: true,
},
},
// EXPLAIN ANALYZE VERBOSE ...
{
s: `EXPLAIN ANALYZE VERBOSE SELECT * FROM cpu`,
stmt: &influxql.ExplainStatement{
Statement: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{
{Expr: &influxql.Wildcard{}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
Verbose: true,
Analyze: true,
},
},
// See issues https://github.com/influxdata/influxdb/issues/1647
// and https://github.com/influxdata/influxdb/issues/4404
// DELETE statement
//{
// s: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`,
// stmt: &influxql.DeleteStatement{
// Source: &influxql.Measurement{Name: "myseries"},
// Condition: &influxql.BinaryExpr{
// Op: influxql.EQ,
// LHS: &influxql.VarRef{Val: "host"},
// RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
// },
// },
//},
// SHOW GRANTS
{
s: `SHOW GRANTS FOR jdoe`,
stmt: &influxql.ShowGrantsForUserStatement{Name: "jdoe"},
},
// SHOW DATABASES
{
s: `SHOW DATABASES`,
stmt: &influxql.ShowDatabasesStatement{},
},
// SHOW SERIES statement
{
s: `SHOW SERIES`,
stmt: &influxql.ShowSeriesStatement{},
},
// SHOW SERIES FROM
{
s: `SHOW SERIES FROM cpu`,
stmt: &influxql.ShowSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW SERIES ON db0
{
s: `SHOW SERIES ON db0`,
stmt: &influxql.ShowSeriesStatement{
Database: "db0",
},
},
// SHOW SERIES FROM /<regex>/
{
s: `SHOW SERIES FROM /[cg]pu/`,
stmt: &influxql.ShowSeriesStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW SERIES with OFFSET 0
{
s: `SHOW SERIES OFFSET 0`,
stmt: &influxql.ShowSeriesStatement{Offset: 0},
},
// SHOW SERIES with LIMIT 2 OFFSET 0
{
s: `SHOW SERIES LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowSeriesStatement{Offset: 0, Limit: 2},
},
// SHOW SERIES WHERE with ORDER BY and LIMIT
{
skip: true,
s: `SHOW SERIES WHERE region = 'order by desc' ORDER BY DESC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.ShowSeriesStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
SortFields: []*influxql.SortField{
&influxql.SortField{Ascending: false},
&influxql.SortField{Name: "field1", Ascending: true},
&influxql.SortField{Name: "field2"},
},
Limit: 10,
},
},
// SHOW SERIES CARDINALITY statement
{
s: `SHOW SERIES CARDINALITY`,
stmt: &influxql.ShowSeriesCardinalityStatement{},
},
// SHOW SERIES CARDINALITY ON dbz statement
{
s: `SHOW SERIES CARDINALITY ON dbz`,
stmt: &influxql.ShowSeriesCardinalityStatement{Database: "dbz"},
},
// SHOW SERIES EXACT CARDINALITY statement
{
s: `SHOW SERIES EXACT CARDINALITY`,
stmt: &influxql.ShowSeriesCardinalityStatement{Exact: true},
},
// SHOW SERIES EXACT CARDINALITY FROM cpu
{
s: `SHOW SERIES EXACT CARDINALITY FROM cpu`,
stmt: &influxql.ShowSeriesCardinalityStatement{
Exact: true,
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW SERIES EXACT CARDINALITY ON db0
{
s: `SHOW SERIES EXACT CARDINALITY ON db0`,
stmt: &influxql.ShowSeriesCardinalityStatement{
Exact: true,
Database: "db0",
},
},
// SHOW SERIES EXACT CARDINALITY FROM /<regex>/
{
s: `SHOW SERIES EXACT CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowSeriesCardinalityStatement{
Exact: true,
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW SERIES EXACT CARDINALITY with OFFSET 0
{
s: `SHOW SERIES EXACT CARDINALITY OFFSET 0`,
stmt: &influxql.ShowSeriesCardinalityStatement{Exact: true, Offset: 0},
},
// SHOW SERIES EXACT CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW SERIES EXACT CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowSeriesCardinalityStatement{Exact: true, Offset: 0, Limit: 2},
},
// SHOW SERIES EXACT CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW SERIES EXACT CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowSeriesCardinalityStatement{
Exact: true,
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW MEASUREMENTS WHERE with ORDER BY and LIMIT
{
skip: true,
s: `SHOW MEASUREMENTS WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.ShowMeasurementsStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
SortFields: []*influxql.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
// SHOW MEASUREMENTS ON db0
{
s: `SHOW MEASUREMENTS ON db0`,
stmt: &influxql.ShowMeasurementsStatement{
Database: "db0",
},
},
// SHOW MEASUREMENTS ON db0.rp0
{
s: `SHOW MEASUREMENTS ON db0.rp0`,
stmt: &influxql.ShowMeasurementsStatement{
Database: "db0",
RetentionPolicy: "rp0",
},
},
// SHOW MEASUREMENTS ON *
{
s: `SHOW MEASUREMENTS ON *`,
stmt: &influxql.ShowMeasurementsStatement{
WildcardDatabase: true,
},
},
// SHOW MEASUREMENTS ON *.*
{
s: `SHOW MEASUREMENTS ON *.*`,
stmt: &influxql.ShowMeasurementsStatement{
WildcardDatabase: true,
WildcardRetentionPolicy: true,
},
},
// SHOW MEASUREMENTS ON db0.*
{
s: `SHOW MEASUREMENTS ON db0.*`,
stmt: &influxql.ShowMeasurementsStatement{
Database: "db0",
WildcardRetentionPolicy: true,
},
},
// SHOW MEASUREMENTS ON *.rp0
{
s: `SHOW MEASUREMENTS ON *.rp0`,
stmt: &influxql.ShowMeasurementsStatement{
RetentionPolicy: "rp0",
WildcardDatabase: true,
},
},
// SHOW MEASUREMENTS WITH MEASUREMENT = cpu
{
s: `SHOW MEASUREMENTS WITH MEASUREMENT = cpu`,
stmt: &influxql.ShowMeasurementsStatement{
Source: &influxql.Measurement{Name: "cpu"},
},
},
// SHOW MEASUREMENTS WITH MEASUREMENT =~ /regex/
{
s: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/`,
stmt: &influxql.ShowMeasurementsStatement{
Source: &influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
// SHOW MEASUREMENT CARDINALITY statement
{
s: `SHOW MEASUREMENT CARDINALITY`,
stmt: &influxql.ShowMeasurementCardinalityStatement{},
},
// SHOW MEASUREMENT CARDINALITY ON db0 statement
{
s: `SHOW MEASUREMENT CARDINALITY ON db0`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: false,
Database: "db0",
},
},
// SHOW MEASUREMENT EXACT CARDINALITY statement
{
s: `SHOW MEASUREMENT EXACT CARDINALITY`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true,
},
},
// SHOW MEASUREMENT EXACT CARDINALITY FROM cpu
{
s: `SHOW MEASUREMENT EXACT CARDINALITY FROM cpu`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true,
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW MEASUREMENT EXACT CARDINALITY ON db0
{
s: `SHOW MEASUREMENT EXACT CARDINALITY ON db0`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true,
Database: "db0",
},
},
// SHOW MEASUREMENT EXACT CARDINALITY FROM /<regex>/
{
s: `SHOW MEASUREMENT EXACT CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true,
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW MEASUREMENT EXACT CARDINALITY with OFFSET 0
{
s: `SHOW MEASUREMENT EXACT CARDINALITY OFFSET 0`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true, Offset: 0},
},
// SHOW MEASUREMENT EXACT CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW MEASUREMENT EXACT CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true, Offset: 0, Limit: 2},
},
// SHOW MEASUREMENT EXACT CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW MEASUREMENT EXACT CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowMeasurementCardinalityStatement{
Exact: true,
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW QUERIES
{
s: `SHOW QUERIES`,
stmt: &influxql.ShowQueriesStatement{},
},
// KILL QUERY 4
{
s: `KILL QUERY 4`,
stmt: &influxql.KillQueryStatement{
QueryID: 4,
},
},
// KILL QUERY 4 ON localhost
{
s: `KILL QUERY 4 ON localhost`,
stmt: &influxql.KillQueryStatement{
QueryID: 4,
Host: "localhost",
},
},
// SHOW RETENTION POLICIES
{
s: `SHOW RETENTION POLICIES`,
stmt: &influxql.ShowRetentionPoliciesStatement{},
},
// SHOW RETENTION POLICIES ON db0
{
s: `SHOW RETENTION POLICIES ON db0`,
stmt: &influxql.ShowRetentionPoliciesStatement{
Database: "db0",
},
},
// SHOW TAG KEY CARDINALITY statement
{
s: `SHOW TAG KEY CARDINALITY`,
stmt: &influxql.ShowTagKeyCardinalityStatement{},
},
// SHOW TAG KEY CARDINALITY FROM cpu
{
s: `SHOW TAG KEY CARDINALITY FROM cpu`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW TAG KEY CARDINALITY ON db0
{
s: `SHOW TAG KEY CARDINALITY ON db0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Database: "db0",
},
},
// SHOW TAG KEY CARDINALITY FROM /<regex>/
{
s: `SHOW TAG KEY CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW TAG KEY CARDINALITY with OFFSET 0
{
s: `SHOW TAG KEY CARDINALITY OFFSET 0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{Offset: 0},
},
// SHOW TAG KEY CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW TAG KEY CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{Offset: 0, Limit: 2},
},
// SHOW TAG KEY CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW TAG KEY CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW TAG KEY EXACT CARDINALITY statement
{
s: `SHOW TAG KEY EXACT CARDINALITY`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Exact: true,
},
},
// SHOW TAG KEY EXACT CARDINALITY FROM cpu
{
s: `SHOW TAG KEY EXACT CARDINALITY FROM cpu`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Exact: true,
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW TAG KEY EXACT CARDINALITY ON db0
{
s: `SHOW TAG KEY EXACT CARDINALITY ON db0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Exact: true,
Database: "db0",
},
},
// SHOW TAG KEY EXACT CARDINALITY FROM /<regex>/
{
s: `SHOW TAG KEY EXACT CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Exact: true,
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW TAG KEY EXACT CARDINALITY with OFFSET 0
{
s: `SHOW TAG KEY EXACT CARDINALITY OFFSET 0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{Exact: true, Offset: 0},
},
// SHOW TAG KEY EXACT CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW TAG KEY EXACT CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowTagKeyCardinalityStatement{Exact: true, Offset: 0, Limit: 2},
},
// SHOW TAG KEY EXACT CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW TAG KEY EXACT CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowTagKeyCardinalityStatement{
Exact: true,
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW TAG KEYS
{
s: `SHOW TAG KEYS FROM src`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
},
},
// SHOW TAG KEYS ON db0
{
s: `SHOW TAG KEYS ON db0`,
stmt: &influxql.ShowTagKeysStatement{
Database: "db0",
},
},
// SHOW TAG KEYS with LIMIT
{
s: `SHOW TAG KEYS FROM src LIMIT 2`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Limit: 2,
},
},
// SHOW TAG KEYS with OFFSET
{
s: `SHOW TAG KEYS FROM src OFFSET 1`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Offset: 1,
},
},
// SHOW TAG KEYS with LIMIT and OFFSET
{
s: `SHOW TAG KEYS FROM src LIMIT 2 OFFSET 1`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Limit: 2,
Offset: 1,
},
},
// SHOW TAG KEYS with SLIMIT
{
s: `SHOW TAG KEYS FROM src SLIMIT 2`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
SLimit: 2,
},
},
// SHOW TAG KEYS with SOFFSET
{
s: `SHOW TAG KEYS FROM src SOFFSET 1`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
SOffset: 1,
},
},
// SHOW TAG KEYS with SLIMIT and SOFFSET
{
s: `SHOW TAG KEYS FROM src SLIMIT 2 SOFFSET 1`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
SLimit: 2,
SOffset: 1,
},
},
// SHOW TAG KEYS with LIMIT, OFFSET, SLIMIT, and SOFFSET
{
s: `SHOW TAG KEYS FROM src LIMIT 4 OFFSET 3 SLIMIT 2 SOFFSET 1`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Limit: 4,
Offset: 3,
SLimit: 2,
SOffset: 1,
},
},
// SHOW TAG KEYS FROM /<regex>/
{
s: `SHOW TAG KEYS FROM /[cg]pu/`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW TAG KEYS
{
skip: true,
s: `SHOW TAG KEYS FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.ShowTagKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
SortFields: []*influxql.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
// SHOW TAG VALUES FROM ... WITH KEY = ...
{
skip: true,
s: `SHOW TAG VALUES FROM src WITH KEY = region WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.ShowTagValuesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "region"},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
SortFields: []*influxql.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
// SHOW TAG VALUES FROM ... WITH KEY IN...
{
s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE region = 'uswest'`,
stmt: &influxql.ShowTagValuesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Op: influxql.IN,
TagKeyExpr: &influxql.ListLiteral{Vals: []string{"region", "host"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
},
},
// SHOW TAG VALUES ... AND TAG KEY =
{
s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region,service,host)WHERE region = 'uswest'`,
stmt: &influxql.ShowTagValuesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Op: influxql.IN,
TagKeyExpr: &influxql.ListLiteral{Vals: []string{"region", "service", "host"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
},
},
// SHOW TAG VALUES WITH KEY = ...
{
s: `SHOW TAG VALUES WITH KEY = host WHERE region = 'uswest'`,
stmt: &influxql.ShowTagValuesStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
},
},
// SHOW TAG VALUES FROM /<regex>/ WITH KEY = ...
{
s: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`,
stmt: &influxql.ShowTagValuesStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES WITH KEY = "..."
{
s: `SHOW TAG VALUES WITH KEY = "host" WHERE region = 'uswest'`,
stmt: &influxql.ShowTagValuesStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: `host`},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "uswest"},
},
},
},
// SHOW TAG VALUES WITH KEY =~ /<regex>/
{
s: `SHOW TAG VALUES WITH KEY =~ /(host|region)/`,
stmt: &influxql.ShowTagValuesStatement{
Op: influxql.EQREGEX,
TagKeyExpr: &influxql.RegexLiteral{Val: regexp.MustCompile(`(host|region)`)},
},
},
// SHOW TAG VALUES ON db0
{
s: `SHOW TAG VALUES ON db0 WITH KEY = "host"`,
stmt: &influxql.ShowTagValuesStatement{
Database: "db0",
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES CARDINALITY statement
{
s: `SHOW TAG VALUES CARDINALITY WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES CARDINALITY FROM cpu
{
s: `SHOW TAG VALUES CARDINALITY FROM cpu WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES CARDINALITY ON db0
{
s: `SHOW TAG VALUES CARDINALITY ON db0 WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Database: "db0",
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES CARDINALITY FROM /<regex>/
{
s: `SHOW TAG VALUES CARDINALITY FROM /[cg]pu/ WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES CARDINALITY with OFFSET 0
{
s: `SHOW TAG VALUES CARDINALITY WITH KEY = host OFFSET 0`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Offset: 0,
},
},
// SHOW TAG VALUES CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW TAG VALUES CARDINALITY WITH KEY = host LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Offset: 0,
Limit: 2,
},
},
// SHOW TAG VALUES CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW TAG VALUES CARDINALITY WITH KEY = host WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW TAG VALUES EXACT CARDINALITY statement
{
s: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES EXACT CARDINALITY FROM cpu
{
s: `SHOW TAG VALUES EXACT CARDINALITY FROM cpu WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES EXACT CARDINALITY ON db0
{
s: `SHOW TAG VALUES EXACT CARDINALITY ON db0 WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Database: "db0",
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES EXACT CARDINALITY FROM /<regex>/
{
s: `SHOW TAG VALUES EXACT CARDINALITY FROM /[cg]pu/ WITH KEY = host`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
},
},
// SHOW TAG VALUES EXACT CARDINALITY with OFFSET 0
{
s: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host OFFSET 0`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Offset: 0,
},
},
// SHOW TAG VALUES EXACT CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Offset: 0,
Limit: 2,
},
},
// SHOW TAG VALUES EXACT CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowTagValuesCardinalityStatement{
Exact: true,
Op: influxql.EQ,
TagKeyExpr: &influxql.StringLiteral{Val: "host"},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW USERS
{
s: `SHOW USERS`,
stmt: &influxql.ShowUsersStatement{},
},
// SHOW FIELD KEYS
{
skip: true,
s: `SHOW FIELD KEYS FROM src ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &influxql.ShowFieldKeysStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
SortFields: []*influxql.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
{
s: `SHOW FIELD KEYS FROM /[cg]pu/`,
stmt: &influxql.ShowFieldKeysStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
{
s: `SHOW FIELD KEYS ON db0`,
stmt: &influxql.ShowFieldKeysStatement{
Database: "db0",
},
},
// SHOW FIELD KEY CARDINALITY statement
{
s: `SHOW FIELD KEY CARDINALITY`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{},
},
// SHOW FIELD KEY CARDINALITY FROM cpu
{
s: `SHOW FIELD KEY CARDINALITY FROM cpu`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW FIELD KEY CARDINALITY ON db0
{
s: `SHOW FIELD KEY CARDINALITY ON db0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Database: "db0",
},
},
// SHOW FIELD KEY CARDINALITY FROM /<regex>/
{
s: `SHOW FIELD KEY CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW FIELD KEY CARDINALITY with OFFSET 0
{
s: `SHOW FIELD KEY CARDINALITY OFFSET 0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Offset: 0,
},
},
// SHOW FIELD KEY CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW FIELD KEY CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Offset: 0,
Limit: 2,
},
},
// SHOW FIELD KEY CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW FIELD KEY CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// SHOW FIELD KEY EXACT CARDINALITY statement
{
s: `SHOW FIELD KEY EXACT CARDINALITY`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
},
},
// SHOW FIELD KEY EXACT CARDINALITY FROM cpu
{
s: `SHOW FIELD KEY EXACT CARDINALITY FROM cpu`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
},
},
// SHOW FIELD KEY EXACT CARDINALITY ON db0
{
s: `SHOW FIELD KEY EXACT CARDINALITY ON db0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Database: "db0",
},
},
// SHOW FIELD KEY EXACT CARDINALITY FROM /<regex>/
{
s: `SHOW FIELD KEY EXACT CARDINALITY FROM /[cg]pu/`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Sources: []influxql.Source{
&influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
},
// SHOW FIELD KEY EXACT CARDINALITY with OFFSET 0
{
s: `SHOW FIELD KEY EXACT CARDINALITY OFFSET 0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Offset: 0,
},
},
// SHOW FIELD KEY EXACT CARDINALITY with LIMIT 2 OFFSET 0
{
s: `SHOW FIELD KEY EXACT CARDINALITY LIMIT 2 OFFSET 0`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Offset: 0,
Limit: 2,
},
},
// SHOW FIELD KEY EXACT CARDINALITY WHERE with ORDER BY and LIMIT
{
s: `SHOW FIELD KEY EXACT CARDINALITY WHERE region = 'order by desc' LIMIT 10`,
stmt: &influxql.ShowFieldKeyCardinalityStatement{
Exact: true,
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.StringLiteral{Val: "order by desc"},
},
Limit: 10,
},
},
// DELETE statement
{
s: `DELETE FROM src`,
stmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}},
},
{
s: `DELETE WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DeleteSeriesStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DeleteSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
// DROP SERIES statement
{
s: `DROP SERIES FROM src`,
stmt: &influxql.DropSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}},
},
{
s: `DROP SERIES WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DropSeriesStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DropSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
// SHOW CONTINUOUS QUERIES statement
{
s: `SHOW CONTINUOUS QUERIES`,
stmt: &influxql.ShowContinuousQueriesStatement{},
},
// CREATE CONTINUOUS QUERY ... INTO <measurement>
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE EVERY 1m FOR 1h BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
},
},
},
},
ResampleEvery: time.Minute,
ResampleFor: time.Hour,
},
},
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE FOR 1h BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
},
},
},
},
ResampleFor: time.Hour,
},
},
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE EVERY 1m BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
},
},
},
},
ResampleEvery: time.Minute,
},
},
{
s: `create continuous query "this.is-a.test" on segments begin select * into measure1 from cpu_load_short end`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "this.is-a.test",
Database: "segments",
Source: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu_load_short"}},
},
},
},
// CREATE CONTINUOUS QUERY ... INTO <retention-policy>.<measurement>
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count(field1) INTO "1h.policy1"."cpu.load" FROM myseries GROUP BY time(5m) END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
Target: &influxql.Target{
Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load", IsTarget: true},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
},
},
},
},
},
},
// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT value INTO "policy1"."value" FROM myseries END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "value"}}},
Target: &influxql.Target{
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value", IsTarget: true},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
},
// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts with multiple values
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT transmit_rx, transmit_tx INTO "policy1"."network" FROM myseries END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "transmit_rx"}},
{Expr: &influxql.VarRef{Val: "transmit_tx"}}},
Target: &influxql.Target{
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network", IsTarget: true},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
},
},
},
// CREATE CONTINUOUS QUERY with backreference measurement name
{
s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT mean(value) INTO "policy1".:measurement FROM /^[a-z]+.*/ GROUP BY time(1m) END`,
stmt: &influxql.CreateContinuousQueryStatement{
Name: "myquery",
Database: "testdb",
Source: &influxql.SelectStatement{
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Target: &influxql.Target{
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", IsTarget: true},
},
Sources: []influxql.Source{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`^[a-z]+.*`)}}},
Dimensions: []*influxql.Dimension{
{
Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 1 * time.Minute},
},
},
},
},
},
},
},
// CREATE DATABASE statement
{
s: `CREATE DATABASE testdb`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: false,
},
},
{
s: `CREATE DATABASE testdb WITH DURATION 24h`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyDuration: duration(24 * time.Hour),
},
},
{
s: `CREATE DATABASE testdb WITH SHARD DURATION 30m`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyShardGroupDuration: 30 * time.Minute,
},
},
{
s: `CREATE DATABASE testdb WITH REPLICATION 2`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyReplication: intptr(2),
},
},
{
s: `CREATE DATABASE testdb WITH NAME test_name`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyName: "test_name",
},
},
{
s: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 NAME test_name`,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyDuration: duration(24 * time.Hour),
RetentionPolicyReplication: intptr(2),
RetentionPolicyName: "test_name",
},
},
{
s: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 SHARD DURATION 10m NAME test_name `,
stmt: &influxql.CreateDatabaseStatement{
Name: "testdb",
RetentionPolicyCreate: true,
RetentionPolicyDuration: duration(24 * time.Hour),
RetentionPolicyReplication: intptr(2),
RetentionPolicyName: "test_name",
RetentionPolicyShardGroupDuration: 10 * time.Minute,
},
},
// CREATE USER statement
{
s: `CREATE USER testuser WITH PASSWORD 'pwd1337'`,
stmt: &influxql.CreateUserStatement{
Name: "testuser",
Password: "pwd1337",
},
},
// CREATE USER ... WITH ALL PRIVILEGES
{
s: `CREATE USER testuser WITH PASSWORD 'pwd1337' WITH ALL PRIVILEGES`,
stmt: &influxql.CreateUserStatement{
Name: "testuser",
Password: "pwd1337",
Admin: true,
},
},
// SET PASSWORD FOR USER
{
s: `SET PASSWORD FOR testuser = 'pwd1337'`,
stmt: &influxql.SetPasswordUserStatement{
Name: "testuser",
Password: "pwd1337",
},
},
// DROP CONTINUOUS QUERY statement
{
s: `DROP CONTINUOUS QUERY myquery ON foo`,
stmt: &influxql.DropContinuousQueryStatement{Name: "myquery", Database: "foo"},
},
// DROP DATABASE statement
{
s: `DROP DATABASE testdb`,
stmt: &influxql.DropDatabaseStatement{
Name: "testdb",
},
},
// DROP MEASUREMENT statement
{
s: `DROP MEASUREMENT cpu`,
stmt: &influxql.DropMeasurementStatement{Name: "cpu"},
},
// DROP RETENTION POLICY
{
s: `DROP RETENTION POLICY "1h.cpu" ON mydb`,
stmt: &influxql.DropRetentionPolicyStatement{
Name: `1h.cpu`,
Database: `mydb`,
},
},
// DROP USER statement
{
s: `DROP USER jdoe`,
stmt: &influxql.DropUserStatement{Name: "jdoe"},
},
// GRANT READ
{
s: `GRANT READ ON testdb TO jdoe`,
stmt: &influxql.GrantStatement{
Privilege: influxql.ReadPrivilege,
On: "testdb",
User: "jdoe",
},
},
// GRANT WRITE
{
s: `GRANT WRITE ON testdb TO jdoe`,
stmt: &influxql.GrantStatement{
Privilege: influxql.WritePrivilege,
On: "testdb",
User: "jdoe",
},
},
// GRANT ALL
{
s: `GRANT ALL ON testdb TO jdoe`,
stmt: &influxql.GrantStatement{
Privilege: influxql.AllPrivileges,
On: "testdb",
User: "jdoe",
},
},
// GRANT ALL PRIVILEGES
{
s: `GRANT ALL PRIVILEGES ON testdb TO jdoe`,
stmt: &influxql.GrantStatement{
Privilege: influxql.AllPrivileges,
On: "testdb",
User: "jdoe",
},
},
// GRANT ALL admin privilege
{
s: `GRANT ALL TO jdoe`,
stmt: &influxql.GrantAdminStatement{
User: "jdoe",
},
},
// GRANT ALL PRVILEGES admin privilege
{
s: `GRANT ALL PRIVILEGES TO jdoe`,
stmt: &influxql.GrantAdminStatement{
User: "jdoe",
},
},
// REVOKE READ
{
s: `REVOKE READ on testdb FROM jdoe`,
stmt: &influxql.RevokeStatement{
Privilege: influxql.ReadPrivilege,
On: "testdb",
User: "jdoe",
},
},
// REVOKE WRITE
{
s: `REVOKE WRITE ON testdb FROM jdoe`,
stmt: &influxql.RevokeStatement{
Privilege: influxql.WritePrivilege,
On: "testdb",
User: "jdoe",
},
},
// REVOKE ALL
{
s: `REVOKE ALL ON testdb FROM jdoe`,
stmt: &influxql.RevokeStatement{
Privilege: influxql.AllPrivileges,
On: "testdb",
User: "jdoe",
},
},
// REVOKE ALL PRIVILEGES
{
s: `REVOKE ALL PRIVILEGES ON testdb FROM jdoe`,
stmt: &influxql.RevokeStatement{
Privilege: influxql.AllPrivileges,
On: "testdb",
User: "jdoe",
},
},
// REVOKE ALL admin privilege
{
s: `REVOKE ALL FROM jdoe`,
stmt: &influxql.RevokeAdminStatement{
User: "jdoe",
},
},
// REVOKE ALL PRIVILEGES admin privilege
{
s: `REVOKE ALL PRIVILEGES FROM jdoe`,
stmt: &influxql.RevokeAdminStatement{
User: "jdoe",
},
},
// CREATE RETENTION POLICY
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: time.Hour,
Replication: 2,
},
},
// CREATE RETENTION POLICY with infinite retention
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION INF REPLICATION 2`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: 0,
Replication: 2,
},
},
// CREATE RETENTION POLICY ... DEFAULT
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 2m REPLICATION 4 DEFAULT`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: 2 * time.Minute,
Replication: 4,
Default: true,
},
},
// CREATE RETENTION POLICY
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 30m`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: time.Hour,
Replication: 2,
ShardGroupDuration: 30 * time.Minute,
},
},
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 0s`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: time.Hour,
Replication: 2,
ShardGroupDuration: 0,
},
},
{
s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 1s`,
stmt: &influxql.CreateRetentionPolicyStatement{
Name: "policy1",
Database: "testdb",
Duration: time.Hour,
Replication: 2,
ShardGroupDuration: time.Second,
},
},
// ALTER RETENTION POLICY
{
s: `ALTER RETENTION POLICY policy1 ON testdb DURATION 1m REPLICATION 4 DEFAULT`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, -1, 4, true),
},
// ALTER RETENTION POLICY with options in reverse order
{
s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION 1m`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, -1, 4, true),
},
// ALTER RETENTION POLICY with infinite retention
{
s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION INF`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", 0, -1, 4, true),
},
// ALTER RETENTION POLICY without optional DURATION
{
s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, 4, true),
},
// ALTER RETENTION POLICY without optional REPLICATION
{
s: `ALTER RETENTION POLICY policy1 ON testdb DEFAULT`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, -1, true),
},
// ALTER RETENTION POLICY without optional DEFAULT
{
s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, 4, false),
},
// ALTER default retention policy unquoted
{
s: `ALTER RETENTION POLICY default ON testdb REPLICATION 4`,
stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, -1, 4, false),
},
// ALTER RETENTION POLICY with SHARD duration
{
s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4 SHARD DURATION 10m`,
stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 10*time.Minute, 4, false),
},
// ALTER RETENTION POLICY with all options
{
s: `ALTER RETENTION POLICY default ON testdb DURATION 0s REPLICATION 4 SHARD DURATION 10m DEFAULT`,
stmt: newAlterRetentionPolicyStatement("default", "testdb", time.Duration(0), 10*time.Minute, 4, true),
},
// ALTER RETENTION POLICY with 0s shard duration
{
s: `ALTER RETENTION POLICY default ON testdb DURATION 0s REPLICATION 1 SHARD DURATION 0s`,
stmt: newAlterRetentionPolicyStatement("default", "testdb", time.Duration(0), 0, 1, false),
},
// SHOW STATS
{
s: `SHOW STATS`,
stmt: &influxql.ShowStatsStatement{
Module: "",
},
},
{
s: `SHOW STATS FOR 'cluster'`,
stmt: &influxql.ShowStatsStatement{
Module: "cluster",
},
},
// SHOW SHARD GROUPS
{
s: `SHOW SHARD GROUPS`,
stmt: &influxql.ShowShardGroupsStatement{},
},
// SHOW SHARDS
{
s: `SHOW SHARDS`,
stmt: &influxql.ShowShardsStatement{},
},
// SHOW DIAGNOSTICS
{
s: `SHOW DIAGNOSTICS`,
stmt: &influxql.ShowDiagnosticsStatement{},
},
{
s: `SHOW DIAGNOSTICS FOR 'build'`,
stmt: &influxql.ShowDiagnosticsStatement{
Module: "build",
},
},
// CREATE SUBSCRIPTION
{
s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS ANY 'udp://host1:9093', 'udp://host2:9093'`,
stmt: &influxql.CreateSubscriptionStatement{
Name: "name",
Database: "db",
RetentionPolicy: "rp",
Destinations: []string{"udp://host1:9093", "udp://host2:9093"},
Mode: "ANY",
},
},
// DROP SUBSCRIPTION
{
s: `DROP SUBSCRIPTION "name" ON "db"."rp"`,
stmt: &influxql.DropSubscriptionStatement{
Name: "name",
Database: "db",
RetentionPolicy: "rp",
},
},
// SHOW SUBSCRIPTIONS
{
s: `SHOW SUBSCRIPTIONS`,
stmt: &influxql.ShowSubscriptionsStatement{},
},
// Errors
{s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, EXPLAIN, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},
{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
{s: `blah blah`, err: `found blah, expected SELECT, DELETE, SHOW, CREATE, DROP, EXPLAIN, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},
{s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`},
{s: `SELECT field1 FROM "series" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`},
{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},
{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected integer at line 1, char 35`},
{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `found 10.5, expected integer at line 1, char 35`},
{s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected integer at line 1, char 36`},
{s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `found 10.5, expected integer at line 1, char 36`},
{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},
{s: `SELECT field1 FROM myseries ORDER BY`, err: `found EOF, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`},
{s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time supported at this time`},
{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},
{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`},
{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
{s: `SELECT distinct FROM myseries`, err: `found FROM, expected identifier at line 1, char 17`},
{s: `SELECT count(distinct) FROM myseries`, err: `found ), expected (, identifier at line 1, char 22`},
{s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`},
{s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
{s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
{s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
{s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`},
// See issues https://github.com/influxdata/influxdb/issues/1647
// and https://github.com/influxdata/influxdb/issues/4404
//{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
//{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},
//{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
{s: `DELETE`, err: `found EOF, expected FROM, WHERE at line 1, char 8`},
{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},
{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
{s: `DELETE FROM "foo".myseries`, err: `retention policy not supported at line 1, char 1`},
{s: `DELETE FROM foo..myseries`, err: `database not supported at line 1, char 1`},
{s: `DROP MEASUREMENT`, err: `found EOF, expected identifier at line 1, char 18`},
{s: `DROP SERIES`, err: `found EOF, expected FROM, WHERE at line 1, char 13`},
{s: `DROP SERIES FROM`, err: `found EOF, expected identifier at line 1, char 18`},
{s: `DROP SERIES FROM src WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
{s: `DROP SERIES FROM "foo".myseries`, err: `retention policy not supported at line 1, char 1`},
{s: `DROP SERIES FROM foo..myseries`, err: `database not supported at line 1, char 1`},
{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},
{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},
{s: `SHOW RETENTION ON`, err: `found ON, expected POLICIES at line 1, char 16`},
{s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`},
{s: `SHOW SHARD`, err: `found EOF, expected GROUPS at line 1, char 12`},
{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENT, MEASUREMENTS, QUERIES, RETENTION, SERIES, SHARD, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6`},
{s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`},
{s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`},
{s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`},
{s: `SHOW GRANTS FOR`, err: `found EOF, expected identifier at line 1, char 17`},
{s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`},
{s: `DROP CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 23`},
{s: `DROP CONTINUOUS QUERY myquery`, err: `found EOF, expected ON at line 1, char 31`},
{s: `DROP CONTINUOUS QUERY myquery ON`, err: `found EOF, expected identifier at line 1, char 34`},
{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},
{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(10s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`},
{s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(5s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`},
{s: `DROP FOO`, err: `found FOO, expected CONTINUOUS, DATABASE, MEASUREMENT, RETENTION, SERIES, SHARD, SUBSCRIPTION, USER at line 1, char 6`},
{s: `CREATE FOO`, err: `found FOO, expected CONTINUOUS, DATABASE, USER, RETENTION, SUBSCRIPTION at line 1, char 8`},
{s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`},
{s: `CREATE DATABASE "testdb" WITH`, err: `found EOF, expected DURATION, NAME, REPLICATION, SHARD at line 1, char 31`},
{s: `CREATE DATABASE "testdb" WITH DURATION`, err: `found EOF, expected duration at line 1, char 40`},
{s: `CREATE DATABASE "testdb" WITH REPLICATION`, err: `found EOF, expected integer at line 1, char 43`},
{s: `CREATE DATABASE "testdb" WITH NAME`, err: `found EOF, expected identifier at line 1, char 36`},
{s: `CREATE DATABASE "testdb" WITH SHARD`, err: `found EOF, expected DURATION at line 1, char 37`},
{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},
{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},
{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},
{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 31`},
{s: `DROP RETENTION POLICY "1h.cpu" ON`, err: `found EOF, expected identifier at line 1, char 35`},
{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},
{s: `DROP SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 19`},
{s: `DROP SUBSCRIPTION "name"`, err: `found EOF, expected ON at line 1, char 25`},
{s: `DROP SUBSCRIPTION "name" ON `, err: `found EOF, expected identifier at line 1, char 30`},
{s: `DROP SUBSCRIPTION "name" ON "db"`, err: `found EOF, expected . at line 1, char 33`},
{s: `DROP SUBSCRIPTION "name" ON "db".`, err: `found EOF, expected identifier at line 1, char 34`},
{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},
{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},
{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},
{s: `CREATE SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 21`},
{s: `CREATE SUBSCRIPTION "name"`, err: `found EOF, expected ON at line 1, char 27`},
{s: `CREATE SUBSCRIPTION "name" ON `, err: `found EOF, expected identifier at line 1, char 32`},
{s: `CREATE SUBSCRIPTION "name" ON "db"`, err: `found EOF, expected . at line 1, char 35`},
{s: `CREATE SUBSCRIPTION "name" ON "db".`, err: `found EOF, expected identifier at line 1, char 36`},
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp"`, err: `found EOF, expected DESTINATIONS at line 1, char 40`},
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS`, err: `found EOF, expected ALL, ANY at line 1, char 54`},
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS ALL `, err: `found EOF, expected string at line 1, char 59`},
{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},
{s: `GRANT READ FROM`, err: `found FROM, expected ON at line 1, char 12`},
{s: `GRANT READ ON`, err: `found EOF, expected identifier at line 1, char 15`},
{s: `GRANT READ ON TO`, err: `found TO, expected identifier at line 1, char 15`},
{s: `GRANT READ ON testdb`, err: `found EOF, expected TO at line 1, char 22`},
{s: `GRANT READ ON testdb TO`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `GRANT READ TO`, err: `found TO, expected ON at line 1, char 12`},
{s: `GRANT WRITE`, err: `found EOF, expected ON at line 1, char 13`},
{s: `GRANT WRITE FROM`, err: `found FROM, expected ON at line 1, char 13`},
{s: `GRANT WRITE ON`, err: `found EOF, expected identifier at line 1, char 16`},
{s: `GRANT WRITE ON TO`, err: `found TO, expected identifier at line 1, char 16`},
{s: `GRANT WRITE ON testdb`, err: `found EOF, expected TO at line 1, char 23`},
{s: `GRANT WRITE ON testdb TO`, err: `found EOF, expected identifier at line 1, char 26`},
{s: `GRANT WRITE TO`, err: `found TO, expected ON at line 1, char 13`},
{s: `GRANT ALL`, err: `found EOF, expected ON, TO at line 1, char 11`},
{s: `GRANT ALL PRIVILEGES`, err: `found EOF, expected ON, TO at line 1, char 22`},
{s: `GRANT ALL FROM`, err: `found FROM, expected ON, TO at line 1, char 11`},
{s: `GRANT ALL PRIVILEGES FROM`, err: `found FROM, expected ON, TO at line 1, char 22`},
{s: `GRANT ALL ON`, err: `found EOF, expected identifier at line 1, char 14`},
{s: `GRANT ALL PRIVILEGES ON`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `GRANT ALL ON TO`, err: `found TO, expected identifier at line 1, char 14`},
{s: `GRANT ALL PRIVILEGES ON TO`, err: `found TO, expected identifier at line 1, char 25`},
{s: `GRANT ALL ON testdb`, err: `found EOF, expected TO at line 1, char 21`},
{s: `GRANT ALL PRIVILEGES ON testdb`, err: `found EOF, expected TO at line 1, char 32`},
{s: `GRANT ALL ON testdb FROM`, err: `found FROM, expected TO at line 1, char 21`},
{s: `GRANT ALL PRIVILEGES ON testdb FROM`, err: `found FROM, expected TO at line 1, char 32`},
{s: `GRANT ALL ON testdb TO`, err: `found EOF, expected identifier at line 1, char 24`},
{s: `GRANT ALL PRIVILEGES ON testdb TO`, err: `found EOF, expected identifier at line 1, char 35`},
{s: `GRANT ALL TO`, err: `found EOF, expected identifier at line 1, char 14`},
{s: `GRANT ALL PRIVILEGES TO`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `KILL`, err: `found EOF, expected QUERY at line 1, char 6`},
{s: `KILL QUERY 10s`, err: `found 10s, expected integer at line 1, char 12`},
{s: `KILL QUERY 4 ON 'host'`, err: `found host, expected identifier at line 1, char 16`},
{s: `REVOKE`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},
{s: `REVOKE BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},
{s: `REVOKE READ`, err: `found EOF, expected ON at line 1, char 13`},
{s: `REVOKE READ TO`, err: `found TO, expected ON at line 1, char 13`},
{s: `REVOKE READ ON`, err: `found EOF, expected identifier at line 1, char 16`},
{s: `REVOKE READ ON FROM`, err: `found FROM, expected identifier at line 1, char 16`},
{s: `REVOKE READ ON testdb`, err: `found EOF, expected FROM at line 1, char 23`},
{s: `REVOKE READ ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 28`},
{s: `REVOKE READ FROM`, err: `found FROM, expected ON at line 1, char 13`},
{s: `REVOKE WRITE`, err: `found EOF, expected ON at line 1, char 14`},
{s: `REVOKE WRITE TO`, err: `found TO, expected ON at line 1, char 14`},
{s: `REVOKE WRITE ON`, err: `found EOF, expected identifier at line 1, char 17`},
{s: `REVOKE WRITE ON FROM`, err: `found FROM, expected identifier at line 1, char 17`},
{s: `REVOKE WRITE ON testdb`, err: `found EOF, expected FROM at line 1, char 24`},
{s: `REVOKE WRITE ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 29`},
{s: `REVOKE WRITE FROM`, err: `found FROM, expected ON at line 1, char 14`},
{s: `REVOKE ALL`, err: `found EOF, expected ON, FROM at line 1, char 12`},
{s: `REVOKE ALL PRIVILEGES`, err: `found EOF, expected ON, FROM at line 1, char 23`},
{s: `REVOKE ALL TO`, err: `found TO, expected ON, FROM at line 1, char 12`},
{s: `REVOKE ALL PRIVILEGES TO`, err: `found TO, expected ON, FROM at line 1, char 23`},
{s: `REVOKE ALL ON`, err: `found EOF, expected identifier at line 1, char 15`},
{s: `REVOKE ALL PRIVILEGES ON`, err: `found EOF, expected identifier at line 1, char 26`},
{s: `REVOKE ALL ON FROM`, err: `found FROM, expected identifier at line 1, char 15`},
{s: `REVOKE ALL PRIVILEGES ON FROM`, err: `found FROM, expected identifier at line 1, char 26`},
{s: `REVOKE ALL ON testdb`, err: `found EOF, expected FROM at line 1, char 22`},
{s: `REVOKE ALL PRIVILEGES ON testdb`, err: `found EOF, expected FROM at line 1, char 33`},
{s: `REVOKE ALL ON testdb TO`, err: `found TO, expected FROM at line 1, char 22`},
{s: `REVOKE ALL PRIVILEGES ON testdb TO`, err: `found TO, expected FROM at line 1, char 33`},
{s: `REVOKE ALL ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 27`},
{s: `REVOKE ALL PRIVILEGES ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 38`},
{s: `REVOKE ALL FROM`, err: `found EOF, expected identifier at line 1, char 17`},
{s: `REVOKE ALL PRIVILEGES FROM`, err: `found EOF, expected identifier at line 1, char 28`},
{s: `CREATE RETENTION`, err: `found EOF, expected POLICY at line 1, char 18`},
{s: `CREATE RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `CREATE RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 33`},
{s: `CREATE RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 36`},
{s: `CREATE RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION at line 1, char 43`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION`, err: `found EOF, expected duration at line 1, char 52`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION bad`, err: `found bad, expected duration at line 1, char 52`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h`, err: `found EOF, expected REPLICATION at line 1, char 54`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected integer at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `found 3.14, expected integer at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected integer at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION INF`, err: `invalid duration INF for shard duration at line 1, char 84`},
{s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},
{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, REPLICATION, SHARD, DEFAULT at line 1, char 42`},
{s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 1 REPLICATION 2`, err: `found duplicate REPLICATION option at line 1, char 56`},
{s: `ALTER RETENTION POLICY policy1 ON testdb DURATION 15251w`, err: `overflowed duration 15251w: choose a smaller duration or INF at line 1, char 51`},
{s: `ALTER RETENTION POLICY policy1 ON testdb DURATION INF SHARD DURATION INF`, err: `invalid duration INF for shard duration at line 1, char 70`},
{s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`},
{s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`},
{s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`},
{s: `SET PASSWORD FOR`, err: `found EOF, expected identifier at line 1, char 18`},
{s: `SET PASSWORD FOR dejan`, err: `found EOF, expected = at line 1, char 24`},
{s: `SET PASSWORD FOR dejan =`, err: `found EOF, expected string at line 1, char 25`},
{s: `SET PASSWORD FOR dejan = bla`, err: `found bla, expected string at line 1, char 26`},
{s: `$SHOW$DATABASES`, err: `found $SHOW, expected SELECT, DELETE, SHOW, CREATE, DROP, EXPLAIN, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},
{s: `SELECT * FROM cpu WHERE "tagkey" = $$`, err: `empty bound parameter`},
// Create a database with a bound parameter.
{
s: `CREATE DATABASE $db`,
params: map[string]interface{}{
"db": map[string]interface{}{"identifier": "mydb"},
},
stmt: &influxql.CreateDatabaseStatement{
Name: "mydb",
},
},
// Count records in a measurement.
{
s: `SELECT count($value) FROM $m`,
params: map[string]interface{}{
"value": map[string]interface{}{"identifier": "my_value"},
"m": map[string]interface{}{"identifier": "my_measurement"},
},
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "count",
Args: []influxql.Expr{
&influxql.VarRef{Val: "my_value"},
}}},
},
Sources: []influxql.Source{&influxql.Measurement{Name: "my_measurement"}},
},
},
// Find the last 10 shapes records.
{
s: `SELECT * FROM $m LIMIT $limit`,
params: map[string]interface{}{
"m": map[string]interface{}{"identifier": "shapes"},
"limit": int64(10),
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.Wildcard{},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "shapes"}},
Limit: 10,
},
},
// Find the last 10 shapes records (advanced syntax).
{
s: `SELECT * FROM $m LIMIT $limit`,
params: map[string]interface{}{
"m": map[string]interface{}{"identifier": "shapes"},
"limit": map[string]interface{}{"integer": json.Number("10")},
},
stmt: &influxql.SelectStatement{
IsRawQuery: true,
Fields: []*influxql.Field{{
Expr: &influxql.Wildcard{},
}},
Sources: []influxql.Source{&influxql.Measurement{Name: "shapes"}},
Limit: 10,
},
},
}
for i, tt := range tests {
if tt.skip {
continue
}
p := influxql.NewParser(strings.NewReader(tt.s))
if tt.params != nil {
p.SetParams(tt.params)
}
stmt, err := p.ParseStatement()
// if it's a CQ, there is a non-exported field that gets memoized during parsing that needs to be set
if st, ok := stmt.(*influxql.CreateContinuousQueryStatement); ok {
if st != nil && st.Source != nil {
tt.stmt.(*influxql.CreateContinuousQueryStatement).Source.GroupByInterval()
}
}
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
} else if tt.err == "" {
if !reflect.DeepEqual(tt.stmt, stmt) {
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String())
t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
} else {
// Attempt to reparse the statement as a string and confirm it parses the same.
// Skip this if we have some kind of statement with a password since those will never be reparsed.
switch stmt.(type) {
case *influxql.CreateUserStatement, *influxql.SetPasswordUserStatement:
continue
}
stmt2, err := influxql.ParseStatement(stmt.String())
if err != nil {
t.Errorf("%d. %q: unable to parse statement string: %s", i, stmt.String(), err)
} else if !reflect.DeepEqual(tt.stmt, stmt2) {
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt2))
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt2.String())
t.Errorf("%d. %q\n\nstmt reparse mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt2)
}
}
}
}
}
// Ensure the parser can parse expressions into an AST.
func TestParser_ParseExpr(t *testing.T) {
var tests = []struct {
s string
expr influxql.Expr
err string
}{
// Primitives
{s: `100.0`, expr: &influxql.NumberLiteral{Val: 100}},
{s: `100`, expr: &influxql.IntegerLiteral{Val: 100}},
{s: `9223372036854775808`, expr: &influxql.UnsignedLiteral{Val: 9223372036854775808}},
{s: `-9223372036854775808`, expr: &influxql.IntegerLiteral{Val: -9223372036854775808}},
{s: `-9223372036854775809`, err: `constant -9223372036854775809 underflows int64`},
{s: `-100.0`, expr: &influxql.NumberLiteral{Val: -100}},
{s: `-100`, expr: &influxql.IntegerLiteral{Val: -100}},
{s: `100.`, expr: &influxql.NumberLiteral{Val: 100}},
{s: `-100.`, expr: &influxql.NumberLiteral{Val: -100}},
{s: `.23`, expr: &influxql.NumberLiteral{Val: 0.23}},
{s: `-.23`, expr: &influxql.NumberLiteral{Val: -0.23}},
{s: `1s`, expr: &influxql.DurationLiteral{Val: time.Second}},
{s: `-1s`, expr: &influxql.DurationLiteral{Val: -time.Second}},
{s: `-+1`, err: `found +, expected identifier, number, duration, ( at line 1, char 2`},
{s: `'foo bar'`, expr: &influxql.StringLiteral{Val: "foo bar"}},
{s: `true`, expr: &influxql.BooleanLiteral{Val: true}},
{s: `false`, expr: &influxql.BooleanLiteral{Val: false}},
{s: `my_ident`, expr: &influxql.VarRef{Val: "my_ident"}},
{s: `'2000-01-01 00:00:00'`, expr: &influxql.StringLiteral{Val: "2000-01-01 00:00:00"}},
{s: `'2000-01-01'`, expr: &influxql.StringLiteral{Val: "2000-01-01"}},
// Simple binary expression
{
s: `1 + 2`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
},
// Binary expression with LHS precedence
{
s: `1 * 2 + 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Binary expression with RHS precedence
{
s: `1 + 2 * 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: 2},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
},
// Binary expression with LHS precedence
{
s: `1 / 2 + 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.BinaryExpr{
Op: influxql.DIV,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Binary expression with RHS precedence
{
s: `1 + 2 / 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.BinaryExpr{
Op: influxql.DIV,
LHS: &influxql.IntegerLiteral{Val: 2},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
},
// Binary expression with LHS precedence
{
s: `1 % 2 + 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.BinaryExpr{
Op: influxql.MOD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Binary expression with RHS precedence
{
s: `1 + 2 % 3`,
expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.BinaryExpr{
Op: influxql.MOD,
LHS: &influxql.IntegerLiteral{Val: 2},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
},
// Binary expression with LHS paren group.
{
s: `(1 + 2) * 3`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Binary expression with no precedence, tests left associativity.
{
s: `1 * 2 * 3`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Addition and subtraction without whitespace.
{
s: `1+2-3`,
expr: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
{
s: `time>now()-5m`,
expr: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: 5 * time.Minute},
},
},
},
// Simple unary expression.
{
s: `-value`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: -1},
RHS: &influxql.VarRef{Val: "value"},
},
},
{
s: `-mean(value)`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: -1},
RHS: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{
&influxql.VarRef{Val: "value"}},
},
},
},
// Unary expressions with parenthesis.
{
s: `-(-4)`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: -1},
RHS: &influxql.ParenExpr{
Expr: &influxql.IntegerLiteral{Val: -4},
},
},
},
// Multiplication with leading subtraction.
{
s: `-2 * 3`,
expr: &influxql.BinaryExpr{
Op: influxql.MUL,
LHS: &influxql.IntegerLiteral{Val: -2},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
// Binary expression with regex.
{
s: `region =~ /us.*/`,
expr: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{Val: "region"},
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`us.*`)},
},
},
// Binary expression with quoted '/' regex.
{
s: `url =~ /http\:\/\/www\.example\.com/`,
expr: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{Val: "url"},
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`http\://www\.example\.com`)},
},
},
// Binary expression with quoted '/' regex without space around operator. Influxdb #9058
{
s: `url=~/http\:\/\/www\.example\.com/`,
expr: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{Val: "url"},
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`http\://www\.example\.com`)},
},
},
// Complex binary expression.
{
s: `value + 3 < 30 AND 1 + 2 OR true`,
expr: &influxql.BinaryExpr{
Op: influxql.OR,
LHS: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.VarRef{Val: "value"},
RHS: &influxql.IntegerLiteral{Val: 3},
},
RHS: &influxql.IntegerLiteral{Val: 30},
},
RHS: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 1},
RHS: &influxql.IntegerLiteral{Val: 2},
},
},
RHS: &influxql.BooleanLiteral{Val: true},
},
},
// Complex binary expression.
{
s: `time > now() - 1d AND time < now() + 1d`,
expr: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.BinaryExpr{
Op: influxql.GT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.SUB,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: mustParseDuration("1d")},
},
},
RHS: &influxql.BinaryExpr{
Op: influxql.LT,
LHS: &influxql.VarRef{Val: "time"},
RHS: &influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.Call{Name: "now"},
RHS: &influxql.DurationLiteral{Val: mustParseDuration("1d")},
},
},
},
},
// Duration math with an invalid literal.
{
s: `time > now() - 1y`,
err: `invalid duration`,
},
// Function call (empty)
{
s: `my_func()`,
expr: &influxql.Call{
Name: "my_func",
},
},
// Function call (multi-arg)
{
s: `my_func(1, 2 + 3)`,
expr: &influxql.Call{
Name: "my_func",
Args: []influxql.Expr{
&influxql.IntegerLiteral{Val: 1},
&influxql.BinaryExpr{
Op: influxql.ADD,
LHS: &influxql.IntegerLiteral{Val: 2},
RHS: &influxql.IntegerLiteral{Val: 3},
},
},
},
},
}
for i, tt := range tests {
expr, err := influxql.NewParser(strings.NewReader(tt.s)).ParseExpr()
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
} else if tt.err == "" && !reflect.DeepEqual(tt.expr, expr) {
t.Errorf("%d. %q\n\nexpr mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.expr, expr)
} else if err == nil {
// Attempt to reparse the expr as a string and confirm it parses the same.
expr2, err := influxql.ParseExpr(expr.String())
if err != nil {
t.Errorf("%d. %q: unable to parse expr string: %s", i, expr.String(), err)
} else if !reflect.DeepEqual(tt.expr, expr2) {
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.expr), mustMarshalJSON(expr2))
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.expr.String(), expr2.String())
t.Errorf("%d. %q\n\nexpr reparse mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.expr, expr2)
}
}
}
}
// Ensure a time duration can be parsed.
func TestParseDuration(t *testing.T) {
var tests = []struct {
s string
d time.Duration
err string
}{
{s: `10ns`, d: 10},
{s: `10u`, d: 10 * time.Microsecond},
{s: `10µ`, d: 10 * time.Microsecond},
{s: `15ms`, d: 15 * time.Millisecond},
{s: `100s`, d: 100 * time.Second},
{s: `2m`, d: 2 * time.Minute},
{s: `2h`, d: 2 * time.Hour},
{s: `2d`, d: 2 * 24 * time.Hour},
{s: `2w`, d: 2 * 7 * 24 * time.Hour},
{s: `1h30m`, d: time.Hour + 30*time.Minute},
{s: `30ms3000u`, d: 30*time.Millisecond + 3000*time.Microsecond},
{s: `-5s`, d: -5 * time.Second},
{s: `-5m30s`, d: -5*time.Minute - 30*time.Second},
{s: ``, err: "invalid duration"},
{s: `3`, err: "invalid duration"},
{s: `1000`, err: "invalid duration"},
{s: `w`, err: "invalid duration"},
{s: `ms`, err: "invalid duration"},
{s: `1.2w`, err: "invalid duration"},
{s: `10x`, err: "invalid duration"},
{s: `10n`, err: "invalid duration"},
}
for i, tt := range tests {
d, err := influxql.ParseDuration(tt.s)
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
} else if tt.d != d {
t.Errorf("%d. %q\n\nduration mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.d, d)
}
}
}
// Ensure a time duration can be formatted.
func TestFormatDuration(t *testing.T) {
var tests = []struct {
d time.Duration
s string
}{
{d: 3 * time.Microsecond, s: `3u`},
{d: 1001 * time.Microsecond, s: `1001u`},
{d: 15 * time.Millisecond, s: `15ms`},
{d: 100 * time.Second, s: `100s`},
{d: 2 * time.Minute, s: `2m`},
{d: 2 * time.Hour, s: `2h`},
{d: 2 * 24 * time.Hour, s: `2d`},
{d: 2 * 7 * 24 * time.Hour, s: `2w`},
}
for i, tt := range tests {
s := influxql.FormatDuration(tt.d)
if tt.s != s {
t.Errorf("%d. %v: mismatch: %s != %s", i, tt.d, tt.s, s)
}
}
}
// Ensure a string can be quoted.
func TestQuote(t *testing.T) {
for i, tt := range []struct {
in string
out string
}{
{``, `''`},
{`foo`, `'foo'`},
{"foo\nbar", `'foo\nbar'`},
{`foo bar\\`, `'foo bar\\\\'`},
{`'foo'`, `'\'foo\''`},
} {
if out := influxql.QuoteString(tt.in); tt.out != out {
t.Errorf("%d. %s: mismatch: %s != %s", i, tt.in, tt.out, out)
}
}
}
// Ensure an identifier's segments can be quoted.
func TestQuoteIdent(t *testing.T) {
for i, tt := range []struct {
ident []string
s string
}{
{[]string{``}, `""`},
{[]string{`select`}, `"select"`},
{[]string{`in-bytes`}, `"in-bytes"`},
{[]string{`foo`, `bar`}, `"foo".bar`},
{[]string{`foo`, ``, `bar`}, `"foo"..bar`},
{[]string{`foo bar`, `baz`}, `"foo bar".baz`},
{[]string{`foo.bar`, `baz`}, `"foo.bar".baz`},
{[]string{`foo.bar`, `rp`, `baz`}, `"foo.bar"."rp".baz`},
{[]string{`foo.bar`, `rp`, `1baz`}, `"foo.bar"."rp"."1baz"`},
} {
if s := influxql.QuoteIdent(tt.ident...); tt.s != s {
t.Errorf("%d. %s: mismatch: %s != %s", i, tt.ident, tt.s, s)
}
}
}
// Ensure DeleteSeriesStatement can convert to a string
func TestDeleteSeriesStatement_String(t *testing.T) {
var tests = []struct {
s string
stmt influxql.Statement
}{
{
s: `DELETE FROM src`,
stmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}},
},
{
s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DeleteSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DeleteSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DELETE WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DeleteSeriesStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
}
for _, test := range tests {
s := test.stmt.String()
if s != test.s {
t.Errorf("error rendering string. expected %s, actual: %s", test.s, s)
}
}
}
// Ensure DropSeriesStatement can convert to a string
func TestDropSeriesStatement_String(t *testing.T) {
var tests = []struct {
s string
stmt influxql.Statement
}{
{
s: `DROP SERIES FROM src`,
stmt: &influxql.DropSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}},
},
{
s: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DropSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DropSeriesStatement{
Sources: []influxql.Source{&influxql.Measurement{Name: "src"}},
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
{
s: `DROP SERIES WHERE host = 'hosta.influxdb.org'`,
stmt: &influxql.DropSeriesStatement{
Condition: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "host"},
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
},
},
},
}
for _, test := range tests {
s := test.stmt.String()
if s != test.s {
t.Errorf("error rendering string. expected %s, actual: %s", test.s, s)
}
}
}
func BenchmarkParserParseStatement(b *testing.B) {
b.ReportAllocs()
s := `SELECT "field" FROM "series" WHERE value > 10`
for i := 0; i < b.N; i++ {
if stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement(); err != nil {
b.Fatalf("unexpected error: %s", err)
} else if stmt == nil {
b.Fatalf("expected statement: %s", stmt)
}
}
b.SetBytes(int64(len(s)))
}
// MustParseSelectStatement parses a select statement. Panic on error.
func MustParseSelectStatement(s string) *influxql.SelectStatement {
stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement()
if err != nil {
panic(err)
}
return stmt.(*influxql.SelectStatement)
}
// MustParseExpr parses an expression. Panic on error.
func MustParseExpr(s string) influxql.Expr {
expr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr()
if err != nil {
panic(err)
}
return expr
}
// errstring converts an error to its string representation.
func errstring(err error) string {
if err != nil {
return err.Error()
}
return ""
}
// newAlterRetentionPolicyStatement creates an initialized AlterRetentionPolicyStatement.
func newAlterRetentionPolicyStatement(name string, DB string, d, sd time.Duration, replication int, dfault bool) *influxql.AlterRetentionPolicyStatement {
stmt := &influxql.AlterRetentionPolicyStatement{
Name: name,
Database: DB,
Default: dfault,
}
if d > -1 {
stmt.Duration = &d
}
if sd > -1 {
stmt.ShardGroupDuration = &sd
}
if replication > -1 {
stmt.Replication = &replication
}
return stmt
}
// mustMarshalJSON encodes a value to JSON.
func mustMarshalJSON(v interface{}) []byte {
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
panic(err)
}
return b
}
func mustParseDuration(s string) time.Duration {
d, err := influxql.ParseDuration(s)
if err != nil {
panic(err)
}
return d
}
func mustLoadLocation(s string) *time.Location {
l, err := time.LoadLocation(s)
if err != nil {
panic(err)
}
return l
}
var LosAngeles = mustLoadLocation("America/Los_Angeles")
func duration(v time.Duration) *time.Duration {
return &v
}
func intptr(v int) *int {
return &v
}
|
package user
import (
"context"
"reflect"
auth "github.com/inhumanLightBackend/auth/logic"
)
type Service interface {
Authenticate(context.Context, string, string) (*User, error)
CreateUser(context.Context, *User) (string, error)
FindUserByEmail(context.Context, string) (*User, error)
FindUserById(context.Context, int) (*User, error)
UpdateUser(context.Context, *User) (string, error)
}
type service struct {
repository Repositiry
}
func NewService(repo Repositiry) Service {
return &service{
repository: repo,
}
}
// Check user in db. If user exist and password right, then return user model.
func (s *service) Authenticate(ctx context.Context, login string, pass string) (*User, error) {
user, err := s.repository.FindUserByEmail(ctx, login)
if err != nil {
return nil, err
}
if !user.ComparePassword(pass) {
return nil, err
}
return user, nil
}
// Validate then create user frm given data
func (s *service) CreateUser(ctx context.Context, user *User) (string, error) {
if err := user.Validate(); err != nil {
return "", err
}
if err := user.BeforeCreate(); err != nil {
return "", err
}
if err := s.repository.CreateUser(ctx, user); err != nil {
return "", err
}
return "Success", nil
}
// Find user by email and return model if query executed without errors
func (s *service) FindUserByEmail(ctx context.Context, email string) (*User, error) {
return s.repository.FindUserByEmail(ctx, email)
}
func (s *service) FindUserById(ctx context.Context, id int) (*User, error) {
return s.repository.FindUserById(ctx, id)
}
// Update user by given fields
func (s *service) UpdateUser(ctx context.Context, user *User) (string, error) {
var response *auth.ValidateResponse
{
userRawId := ctx.Value(auth.CtxUserKey).(string)
serv, _, err := AuthGRPCService(AuthserverPort, nil)
if err != nil {
return "", err
}
response, err = serv.Validate(ctx, &auth.ValidateRequest{
Token: userRawId,
})
if err != nil {
return "", err
}
}
if response.Role == USER && user.Role != "" {
return "", errPermissionsDeni
}
if user.Token != "" || user.EncryptedPassword != "" || user.Password != "" {
return "", errPermissionsDeni
}
var (
newUser *User
err error
)
{
isZeroValue := func(x interface{}) bool {
return x == reflect.Zero(reflect.TypeOf(x)).Interface()
}
newUser, err = s.FindUserById(ctx, int(response.UserId))
if err != nil {
return "", err
}
m := reflect.ValueOf(user)
if m.Kind() == reflect.Ptr {
m = m.Elem()
for i := 0; i < m.NumField(); i++ {
field := m.Field(i)
if !isZeroValue(field.Interface()) {
reflect.ValueOf(newUser).Elem().Field(i).Set(field)
}
}
}
}
if err := newUser.Validate(); err != nil {
return "", err
}
if err := s.repository.UpdateUser(ctx, newUser); err != nil {
return "", err
}
return "Updated", nil
}
|
package meda
func (c *Config) CopyFrom(other *Config) {
c.Driver = other.Driver
c.DataSourceName = other.DataSourceName
c.TablePrefix = other.TablePrefix
c.MaxOpenConns = other.MaxOpenConns
c.MaxIdleConns = other.MaxIdleConns
c.ConnMaxLifetime = other.ConnMaxLifetime
c.LockKeepAliveInterval = other.LockKeepAliveInterval
c.ServerConcurrencyHint = other.ServerConcurrencyHint
}
func (c *Config) Merge(other *Config) *Config {
if len(other.Driver) > 0 {
c.Driver = other.Driver
}
if len(other.DataSourceName) > 0 {
c.DataSourceName = other.DataSourceName
}
if len(other.TablePrefix) > 0 {
c.TablePrefix = other.TablePrefix
}
if other.MaxOpenConns != 0 {
c.MaxOpenConns = other.MaxOpenConns
}
if other.MaxIdleConns != 0 {
c.MaxIdleConns = other.MaxIdleConns
}
if other.ConnMaxLifetime != 0 {
c.ConnMaxLifetime = other.ConnMaxLifetime
}
if other.LockKeepAliveInterval != 0 {
c.LockKeepAliveInterval = other.LockKeepAliveInterval
}
if other.ServerConcurrencyHint != 0 {
c.ServerConcurrencyHint = other.ServerConcurrencyHint
}
return c
}
func (c *Config) Clone() *Config {
config := &Config{}
config.CopyFrom(c)
return config
}
|
package main
import (
"fmt"
"os/exec"
)
func lookCmmand(cmd string) {
value := aliasTable[cmd]
if value != "" {
fmt.Printf("%s: aliased to %s\n", cmd, value)
return
}
value, err := exec.LookPath(cmd)
if err == nil {
fmt.Printf("%s: %s\n", cmd, value)
return
}
fmt.Printf("%s NOT FOUND\n", cmd)
}
|
package account
import (
"log"
"strconv"
"testing"
)
func TestRankListManager_Rank(t *testing.T) {
for i := 0; i < 10; i++ {
log.Println(DefaultRankList().Rank(uint32(i), "玩家"+strconv.Itoa(i), int32(10-i)))
}
log.Println(DefaultRankList().Rank(123, "玩家"+strconv.Itoa(123), int32(11)))
log.Println(DefaultRankList().Rank(2, "玩家"+strconv.Itoa(2), int32(6)))
log.Println(DefaultRankList().Rank(3, "玩家"+strconv.Itoa(3), int32(60)))
log.Println("======", DefaultRankList().rank)
DefaultRankList().user.Range(func(key, value interface{}) bool {
log.Println(key, value)
return true
})
}
|
package lib
import (
"net/http"
)
type repositoryListResponse struct {
repositories chan Repository
err error
}
func (r *repositoryListResponse) Repositories() <-chan Repository {
return (r.repositories)
}
func (r *repositoryListResponse) LastError() error {
return r.err
}
func (r *repositoryListResponse) setLastError(err error) {
r.err = err
}
func (r *repositoryListResponse) close() {
close(r.repositories)
}
type repositoryListJsonResponse struct {
Repositories []string `json:"repositories"`
}
func (r *repositoryListJsonResponse) validate() error {
if r.Repositories == nil {
return genericMalformedResponseError
}
return nil
}
type repositoryListRequestContext struct{}
func (r *repositoryListRequestContext) path() string {
return "v2/_catalog"
}
func (r *repositoryListRequestContext) validateApiResponse(response *http.Response, initialRequest bool) error {
switch response.StatusCode {
case http.StatusUnauthorized, http.StatusForbidden:
return genericAuthorizationError
case http.StatusNotFound:
if initialRequest {
return NotImplementedByRemoteError("registry does not implement repository listings")
} else {
return newInvalidStatusCodeError(response.StatusCode)
}
case http.StatusOK:
return nil
default:
return newInvalidStatusCodeError(response.StatusCode)
}
}
func (r *repositoryListRequestContext) processPartialResponse(response paginatedRequestResponse, apiResponse interface{}) {
for _, repositoryName := range apiResponse.(*repositoryListJsonResponse).Repositories {
response.(*repositoryListResponse).repositories <- newRepository(repositoryName)
}
}
func (r *repositoryListRequestContext) createResponse(api *registryApi) paginatedRequestResponse {
return &repositoryListResponse{
repositories: make(chan Repository, api.pageSize()),
}
}
func (r *repositoryListRequestContext) createJsonResponse() validatable {
return new(repositoryListJsonResponse)
}
func (r *repositoryListRequestContext) tokenCacheHint() string {
return cacheHintRegistryList()
}
func (r *repositoryListRequestContext) getHeaders() map[string]string {
return nil
}
func (r *registryApi) ListRepositories() RepositoryListResponse {
return r.paginatedRequest(new(repositoryListRequestContext)).(*repositoryListResponse)
}
|
package types
//
// OauthAccessTokenResponse is the response to an access token request
//
type OauthAccessTokenResponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
}
|
package main
import (
"container/list"
"fmt"
)
//type List struct {
// root Element
// len int
//}
//
//// 链表就是有一个prev和next的指针数组
//type Element struct {
// next, prev *Element // 上一个和下一个元素
// list *List
// value interface{} // 元素
//}
func main() {
list := list.New()
// 链表后面插入俩值
list.PushBack(1)
list.PushBack(2)
fmt.Printf("lenL %v\n", list.Len())
fmt.Printf("first: %#v\n", list.Front())
fmt.Printf("secibd: %#v\n", list.Front().Next())
}
|
package main
import (
"fmt"
"sync"
"sync/atomic"
)
const (
batch int = 1000
workers int = 50
)
func worker(wg *sync.WaitGroup, psum *uint64, batch int) {
for c := 0; c < batch; c++ {
atomic.AddUint64(psum, 1)
}
wg.Done()
}
func main() {
var sum uint64
var wg sync.WaitGroup
for i := 0; i < workers; i++ {
wg.Add(1) // add worker
go worker(&wg, &sum, batch)
}
wg.Wait() // all dones
expected := batch * workers
fmt.Printf("sum should be %v, actual is %v", expected, sum)
}
|
// go run has_method.go
package main
import (
"fmt"
"reflect"
)
type myStruct struct {
a int
}
type myInterface interface {
SetA(int)
}
func (s *myStruct) SetA(a int) {
s.a = a
}
func main() {
s := myStruct{}
s.SetA(1)
fmt.Println(s) // {1}
st := reflect.ValueOf(&s)
fmt.Println(st) // main.myStruct
m1 := st.MethodByName("SetA")
m2 := st.MethodByName("SetB")
fmt.Println(m1) // 0x487860
fmt.Println(m2) // <invalid reflect.Value>
fmt.Println(m2.IsValid()) // false
if m1.IsValid() {
args := []int{7} // just one arg
inputs := make([]reflect.Value, len(args))
for i := range args {
inputs[i] = reflect.ValueOf(args[i])
}
m1.Call(inputs)
}
fmt.Println(s) // {7}
}
|
package solver
import (
"testing"
"github.com/truggeri/go-sudoku/cmd/go-sudoku/puzzle"
)
var result puzzle.Puzzle
func BenchmarkSolverEasy(b *testing.B) {
var r puzzle.Puzzle
puzzle := CreateTestPuzzleEasy()
for n := 0; n < b.N; n++ {
r = Solve(puzzle)
}
result = r
}
func BenchmarkSolverMedium(b *testing.B) {
var r puzzle.Puzzle
puzzle := CreateTestPuzzleMedium()
for n := 0; n < b.N; n++ {
r = Solve(puzzle)
}
result = r
}
func BenchmarkSolverHard(b *testing.B) {
var r puzzle.Puzzle
puzzle := CreateTestPuzzleHard()
for n := 0; n < b.N; n++ {
r = Solve(puzzle)
}
result = r
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
var words [5]string
input := bufio.NewScanner(os.Stdin)
for i := 0; i < len(words); i++ {
var message string
input.Scan()
message = input.Text() //strconv.ParseFloat(input.Text(), 32) // strconv.Atoi("12")
words[i] = message
}
var inputs int
input.Scan()
inputs, _ = strconv.Atoi(input.Text())
//for i:=0 ;i < inputs; i++{
//.......
//}
fmt.Println("Messages:", words)
}
|
package version_test
import (
. "github.com/bossjones/go-chatbot-lab/shared/version"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Version", func() {
Describe("Default Version variables", func() {
It("should be <Unknown>", func() {
Expect(VersionPrerelease).To(Equal(""))
Expect(Version).To(Equal("<Unknown>"))
Expect(BuildDate).To(Equal("<Unknown>"))
Expect(GitCommit).To(Equal("<Unknown>"))
// Expect(GoVersion).To(BeNumerically(">", "go1.9.2"))
})
})
Describe("Func FullVersion", func() {
It("returns a properly formatted string", func() {
full_version := FullVersion()
expected_full_version := "<Unknown> (<Unknown>)"
Expect(full_version).To(Equal(expected_full_version))
})
})
Describe("Func DetailedVersionInfo", func() {
It("returns a properly formatted string", func() {
detailed_version_info := DetailedVersionInfo()
expected_detailed_version_info := "Go-Chatbot-Lab <Unknown>; buildDate=<Unknown>; sha=<Unknown>"
Expect(detailed_version_info).To(Equal(expected_detailed_version_info))
})
})
Describe("Func ConvertToNumeric", func() {
It("returns a properly formatted string", func() {
detailed_convert_to_numeric := ConvertToNumeric("1.2.1")
expected_detailed_convert_to_numeric := 1.002001e+06
Expect(detailed_convert_to_numeric).To(Equal(expected_detailed_convert_to_numeric))
})
})
})
|
package byopenwrt
import "github.com/go-cmd/cmd"
func ServiceRestart(name string)error{
aps:=cmd.NewCmd("/etc/init.d/"+name,"restart")
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
return status.Error
}
return nil
}
func ServiceStop(name string)error{
aps:=cmd.NewCmd("/etc/init.d/"+name,"stop")
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
return status.Error
}
return nil
}
func ServiceStart(name string)error{
aps:=cmd.NewCmd("/etc/init.d/"+name,"start")
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
return status.Error
}
return nil
}
|
package integration_test
import (
"github.com/cloudfoundry/libbuildpack/cutlass"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("deploy a staticfile app", func() {
var app *cutlass.App
var app_name string
AfterEach(func() {
if app != nil {
app.Destroy()
}
app = nil
app_name = ""
})
JustBeforeEach(func() {
Expect(app_name).ToNot(BeEmpty())
app = cutlass.New(Fixtures(app_name))
PushAppAndConfirm(app)
})
Context("Using ENV Variable", func() {
BeforeEach(func() { app_name = "with_https" })
It("receives a 301 redirect to https", func() {
_, headers, err := app.Get("/", map[string]string{"NoFollow": "true"})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf(HavePrefix("https://"))))
})
It("injects x-forwarded-host into Location on redirect", func() {
var upstreamHostName = "upstreamHostName.com"
_, headers, err := app.Get("/", map[string]string{"NoFollow": "true", "X-Forwarded-Host": upstreamHostName})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf(HavePrefix(fmt.Sprintf("https://%s", upstreamHostName)))))
})
Context("Comma separated values in X-Forwarded headers", func() {
It("picks leftmost x-forwarded-host,-port values into Location on redirect", func() {
_, headers, err := app.Get("/path1/path2", map[string]string{
"NoFollow": "true",
"X-Forwarded-Host": "host.com, something.else",
"X-Forwarded-Prefix": "/pre/fix1, /pre/fix2",
})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf("https://host.com/pre/fix1/path1/path2")))
})
})
})
Context("Using Staticfile", func() {
BeforeEach(func() { app_name = "with_https_in_staticfile" })
It("receives a 301 redirect to https", func() {
_, headers, err := app.Get("/", map[string]string{"NoFollow": "true"})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf(HavePrefix("https://"))))
})
It("injects x-forwarded-host into Location on redirect", func() {
var upstreamHostName = "upstreamHostName.com"
_, headers, err := app.Get("/", map[string]string{"NoFollow": "true", "X-Forwarded-Host": upstreamHostName})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf(HavePrefix(fmt.Sprintf("https://%s", upstreamHostName)))))
})
Context("Comma separated values in X-Forwarded headers", func() {
It("picks leftmost x-forwarded-host,-port values into Location on redirect", func() {
_, headers, err := app.Get("/path1/path2", map[string]string{
"NoFollow": "true",
"X-Forwarded-Host": "host.com, something.else",
"X-Forwarded-Prefix": "/pre/fix1, /pre/fix2",
})
Expect(err).To(BeNil())
Expect(headers).To(HaveKeyWithValue("StatusCode", []string{"301"}))
Expect(headers).To(HaveKeyWithValue("Location", ConsistOf("https://host.com/pre/fix1/path1/path2")))
})
})
})
})
|
// Copyright (C) Microsoft Corporation.
package main
import (
"database/sql"
"errors"
"flag"
"fmt"
"log"
"os"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
"mssqlcommon"
mssqlocf "mssqlcommon/ocf"
)
/*
Program to be called from the mssql:fci resource agent to monitor SQL Server health.
Determines the health of the specified SQL Server instance based on
1) whether a connection can be established to the instance, and
2) the results of the 'sp_server_diagnostics' stored procedure
*/
func main() {
stdout := log.New(os.Stdout, "", log.LstdFlags)
stderr := log.New(os.Stderr, "ERROR: ", log.LstdFlags)
err := mssqlocf.KillCurrentProcessWhenParentExits()
if err != nil {
mssqlocf.Exit(stderr, 1, fmt.Errorf("Unexpected error: %s", err))
}
err = doMain(stdout, stderr)
if err != nil {
mssqlocf.Exit(stderr, 1, fmt.Errorf("Unexpected error: %s", err))
}
}
func doMain(stdout *log.Logger, stderr *log.Logger) error {
var (
hostname string
sqlPort uint64
credentialsFile string
applicationName string
rawConnectionTimeout int64
rawHealthThreshold uint
rawMonitorTimeout int64
action string
virtualServerName string
)
flag.StringVar(&hostname, "hostname", "localhost", "The hostname of the SQL Server instance to connect to. Default: localhost")
flag.Uint64Var(&sqlPort, "port", 0, "The port on which the instance is listening for logins.")
flag.StringVar(&credentialsFile, "credentials-file", "", "The path to the credentials file.")
flag.StringVar(&applicationName, "application-name", "", "The application name to use for the T-SQL connection.")
flag.Int64Var(&rawConnectionTimeout, "connection-timeout", 30, "The connection timeout in seconds. "+
"The application will retry connecting to the instance until this time elapses. Default: 30")
flag.UintVar(&rawHealthThreshold, "health-threshold", uint(mssqlcommon.ServerCriticalError), "The instance health threshold. Default: 3 (SERVER_CRITICAL_ERROR)")
flag.StringVar(&action, "action", "", `One of --start, --monitor
start: Start the replica on this node.
monitor: Monitor the replica on this node.`)
flag.StringVar(&virtualServerName, "virtual-server-name", "", "The virtual server name that should be set on the SQL Server instance.")
flag.Int64Var(&rawMonitorTimeout, "monitor-interval-timeout", 0, "The monitor interval timeout in seconds. "+
"For FCI this is expected to be always Default: 0")
flag.Parse()
stdout.Printf(
"fci-helper invoked with hostname [%s]; port [%d]; credentials-file [%s]; application-name [%s]; connection-timeout [%d]; health-threshold [%d]; action [%s]\n",
hostname, sqlPort,
credentialsFile,
applicationName,
rawConnectionTimeout, rawHealthThreshold,
action)
switch action {
case "start":
stdout.Printf(
"fci-helper invoked with virtual-server-name [%s]\n",
virtualServerName)
case "monitor":
stdout.Printf(
"fci-helper invoked with virtual-server-name [%s]\n",
virtualServerName)
}
if hostname == "" {
return errors.New("a valid hostname must be specified using --hostname")
}
if sqlPort == 0 {
return errors.New("a valid port number must be specified using --port")
}
if credentialsFile == "" {
return errors.New("a valid path to a credentials file must be specified using --credentials-file")
}
if applicationName == "" {
return errors.New("a valid application name must be specified using --application-name")
}
if action == "" {
return errors.New("a valid action must be specified using --action")
}
if action == "start" || action == "monitor" {
if virtualServerName == "" {
return errors.New("a valid virtual server name must be specified using --virtual-server-name")
}
}
err := mssqlocf.ImportOcfExitCodes()
if err != nil {
return err
}
connectionTimeout := time.Duration(rawConnectionTimeout) * time.Second
monitorTimeout := time.Duration(rawMonitorTimeout) * time.Second
healthThreshold := mssqlcommon.ServerHealth(rawHealthThreshold)
sqlUsername, sqlPassword, err := mssqlcommon.ReadCredentialsFile(credentialsFile)
if err != nil {
return mssqlocf.OcfExit(stderr, mssqlocf.OCF_ERR_ARGS, fmt.Errorf("Could not read credentials file: %s", err))
}
db, err := mssqlcommon.OpenDBWithHealthCheck(
hostname, sqlPort,
sqlUsername, sqlPassword,
applicationName,
connectionTimeout, connectionTimeout,
monitorTimeout,
stdout)
if err != nil {
switch serverUnhealthyError := err.(type) {
case *mssqlcommon.ServerUnhealthyError:
if serverUnhealthyError.RawValue <= healthThreshold {
return mssqlocf.OcfExit(stderr, mssqlocf.OCF_ERR_GENERIC, fmt.Errorf(
"Instance health status %d is at or below the threshold value of %d",
serverUnhealthyError.RawValue, healthThreshold))
}
stdout.Printf("Instance health status %d is greater than the threshold value of %d\n", serverUnhealthyError.RawValue, healthThreshold)
default:
return err
}
}
defer db.Close()
var ocfExitCode mssqlocf.OcfExitCode
switch action {
case "start":
ocfExitCode, err = start(db, virtualServerName, stdout)
case "monitor":
ocfExitCode, err = monitor(db, virtualServerName, stdout)
default:
return fmt.Errorf("unknown value for --action %s", action)
}
return mssqlocf.OcfExit(stderr, ocfExitCode, err)
}
// Function: start
//
// Description:
// Implements the OCF "start" action
//
func start(db *sql.DB, virtualServerName string, stdout *log.Logger) (mssqlocf.OcfExitCode, error) {
stdout.Printf("Setting local server name to %s...\n", virtualServerName)
err := mssqlcommon.SetLocalServerName(db, virtualServerName)
if err != nil {
return mssqlocf.OCF_ERR_GENERIC, fmt.Errorf("Could not set local server name: %s", err)
}
return monitor(db, virtualServerName, stdout)
}
// Function: monitor
//
// Description:
// Implements the OCF "monitor" action
//
func monitor(db *sql.DB, virtualServerName string, stdout *log.Logger) (mssqlocf.OcfExitCode, error) {
stdout.Println("Querying local server name...")
currentServerName, err := mssqlcommon.GetLocalServerName(db)
if err != nil {
return mssqlocf.OCF_ERR_GENERIC, fmt.Errorf("Could not query local server name: %s", err)
}
stdout.Printf("Local server name is %s\n", currentServerName)
if !strings.EqualFold(currentServerName, virtualServerName) {
return mssqlocf.OCF_ERR_ARGS, fmt.Errorf("Expected local server name to be %s but it was %s", virtualServerName, currentServerName)
}
return mssqlocf.OCF_SUCCESS, nil
}
|
package pokemon
import (
"net/http"
"github.com/gorilla/mux"
"github.com/hrishin/pokemon-shakespeare/pkg/description"
"github.com/hrishin/pokemon-shakespeare/pkg/response"
"github.com/hrishin/pokemon-shakespeare/pkg/translation"
"github.com/op/go-logging"
)
var log = logging.MustGetLogger("pokemon")
// GetDescriptionHandler is a handler to get the pokemons description
// and translates into shakespeares words
func GetDescriptionHandler(w http.ResponseWriter, r *http.Request) {
//One of the reason to mux handler is to extract the path variables easily
vars := mux.Vars(r)
name := vars["name"]
log.Infof("getting the translation for %s pokemon", name)
de := description.NewDescriptor()
desc := de.DescribePokemon(name)
if desc.Error != nil {
desc.ToErrorResponse().WriteErrorTo(w)
return
}
tr := translation.NewTranslator()
trans := tr.Translate(desc.Content)
if trans.Error != nil {
trans.ToErrorResponse().WriteErrorTo(w)
return
}
log.Infof("responding the translation %s for %s pokemon", trans.Content, name)
response.NewAPIResponse(name, trans.Content).SendResponseTO(w)
}
|
package cmd
import (
"log"
"github.com/spf13/cobra"
"github.com/cilium/kubenetbench/kubenetbench/core"
)
var policyArg string
var pod2podCmd = &cobra.Command{
Use: "pod2pod",
Short: "pod-to-pod network benchmark run",
Run: func(cmd *cobra.Command, args []string) {
if policyArg != "" && policyArg != "port" {
log.Fatal("invalid policy: ", policyArg)
}
runctx, err := getRunBenchCtx("pod2pod", true)
if err != nil {
log.Fatal("initializing run context failed:", err)
}
st := core.Pod2PodSt{
RunBenchCtx: runctx,
Policy: policyArg,
}
err = st.Execute()
if err != nil {
log.Fatal("pod2pod execution failed:", err)
}
},
}
func init() {
addBenchmarkFlags(pod2podCmd)
pod2podCmd.Flags().StringVar(&policyArg, "policy", "", "isolation policy (empty or \"port\")")
}
|
package handler
import (
"net/http"
"path"
"github.com/webhippie/oauth2-proxy/pkg/config"
)
// Auth handles the callback from the OAuth2 provider.
func Auth(cfg *config.Config) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Redirect(
w,
r,
path.Join(
cfg.Server.Root,
"login",
),
http.StatusMovedPermanently,
)
}
}
|
package tmp
const HelperFuncTmp = `package helper
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"github.com/0LuigiCode0/logger"
)
func ParseConfig() (*Config, error) {
_, err := os.Stat(ConfigDir + ConfigFiel)
if err != nil {
return nil, fmt.Errorf(KeyErrorNotFound+": file: %v", ConfigDir+ConfigFiel)
}
file, err := os.Open(ConfigDir + ConfigFiel)
if err != nil {
return nil, fmt.Errorf(KeyErrorOpen+": file: %v", err)
}
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err != nil {
return nil, fmt.Errorf(KeyErrorRead+": body: %v", err)
}
data := new(Config)
err = json.Unmarshal(buf, data)
if err != nil {
return nil, fmt.Errorf(KeyErrorParse+": json: %v", err)
}
return data, err
}
func InitCtx() {
Ctx, CloseCtx = context.WithCancel(context.Background())
}
func InitLogger() {
Log = logger.InitLogger("")
}
func Dispatch(f interface{}, args ...interface{}) {
ff := reflect.ValueOf(f)
if ff.Kind() == reflect.Func {
in := make([]reflect.Value, ff.Type().NumIn())
for i, arg := range args {
v := reflect.ValueOf(arg)
if v.Type().ConvertibleTo(ff.Type().In(i)) {
in[i] = v.Convert(ff.Type().In(i))
} else {
Log.Warningf("parameter: %v, expected %v got %v", i+1, ff.Type().In(i), v.Type())
return
}
}
Wg.Add(1)
go func() {
ff.Call(in)
Wg.Done()
}()
}
}`
|
package ehttp
import (
"errors"
"strconv"
"strings"
"github.com/gin-gonic/gin"
)
// parameterRule the rule of the parameter, check if parameter is valid
type parameterRule interface {
Check(*gin.Context) error
}
// parameterRuleBase the base class of parameterRule
type parameterRuleBase struct {
Name string
In string
Required bool
}
// GetValue get parameter value from the http request
func (p parameterRuleBase) GetValue(c *gin.Context) (string, error) {
var value string
switch p.In {
case "header":
value = c.GetHeader(p.Name)
case "path":
value = c.Param(p.Name)
case "query":
value = c.Query(p.Name)
case "formData":
value = c.PostForm(p.Name)
default:
return "", errors.New("parameter in " + p.In + " is not supported")
}
if p.Required {
if value == "" {
return "", errors.New("miss parameter " + p.Name)
}
}
return value, nil
}
// parameterRuleInt the rule of the parameter(type is integer), check if parameter is valid
type parameterRuleInt struct {
parameterRuleBase
BitSize int
Enum map[int64]bool
HasMin bool
Min int64
HasMax bool
Max int64
}
// Check if parameter is valid
func (p parameterRuleInt) Check(c *gin.Context) error {
value, err := p.GetValue(c)
if err != nil {
return err
}
if value == "" {
return nil
}
numValue, err := strconv.ParseInt(value, 10, p.BitSize)
if err != nil {
return err
}
if p.Enum != nil {
if _, ok := p.Enum[numValue]; !ok {
return errors.New("enum invalid (" + value + ")")
}
}
if p.HasMin {
if numValue < p.Min {
return errors.New(value + " less than the minimum")
}
}
if p.HasMax {
if numValue > p.Max {
return errors.New(value + " greater than the maximum")
}
}
return nil
}
// parameterRuleUint the rule of the parameter(type is unsigned integer), check if parameter is valid
type parameterRuleUint struct {
parameterRuleBase
BitSize int
Enum map[uint64]bool
HasMin bool
Min uint64
HasMax bool
Max uint64
}
// Check if parameter is valid
func (p parameterRuleUint) Check(c *gin.Context) error {
value, err := p.GetValue(c)
if err != nil {
return err
}
if value == "" {
return nil
}
numValue, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
if p.Enum != nil {
if _, ok := p.Enum[numValue]; !ok {
return errors.New("invalid enum type (" + value + ")")
}
}
if p.HasMin {
if numValue < p.Min {
return errors.New(value + " less than the minimum")
}
}
if p.HasMax {
if numValue > p.Max {
return errors.New(value + " greater than the maximum")
}
}
return nil
}
// parameterRuleFloat the rule of the parameter(type is float), check if parameter is valid
type parameterRuleFloat struct {
parameterRuleBase
BitSize int
HasMin bool
Min float64
HasMax bool
Max float64
}
// Check if parameter is valid
func (p parameterRuleFloat) Check(c *gin.Context) error {
value, err := p.GetValue(c)
if err != nil {
return err
}
if value == "" {
return nil
}
numValue, err := strconv.ParseFloat(value, p.BitSize)
if err != nil {
return err
}
if p.HasMin {
if numValue < p.Min {
return errors.New(value + " less than the minimum")
}
}
if p.HasMax {
if numValue > p.Max {
return errors.New(value + " greater than the maximum")
}
}
return nil
}
// parameterRuleString the rule of the parameter(type is string), check if parameter is valid
type parameterRuleString struct {
parameterRuleBase
Enum map[string]bool
}
// Check if parameter is valid
func (p parameterRuleString) Check(c *gin.Context) error {
value, err := p.GetValue(c)
if err != nil {
return err
}
if value == "" {
return nil
}
if p.Enum != nil {
if _, ok := p.Enum[value]; !ok {
return errors.New("invalid enum type (" + value + ")")
}
}
return nil
}
// parameterRuleBool the rule of the parameter(type is bool), check if parameter is valid
type parameterRuleBool struct {
parameterRuleBase
}
// Check if parameter is valid
func (p parameterRuleBool) Check(c *gin.Context) error {
value, err := p.GetValue(c)
if err != nil {
return err
}
if value == "" {
return nil
}
_, err = strconv.ParseBool(value)
return err
}
func getParameterRules(params map[string]Parameter) ([]parameterRule, error) {
rules := []parameterRule{}
for name, param := range params {
if err := param.check(); err != nil {
return nil, err
}
_rules, err := toParameterRules(name, ¶m)
if err != nil {
return nil, err
}
rules = append(rules, _rules...)
}
return rules, nil
}
func toParameterRules(name string, param *Parameter) ([]parameterRule, error) {
rules := []parameterRule{}
if hasParameterRule(param.InPath) {
rule, err := newParameterRule(name, InPath, param.InPath)
if err != nil {
return nil, err
}
rules = append(rules, rule)
}
if hasParameterRule(param.InHeader) {
rule, err := newParameterRule(name, InHeader, param.InHeader)
if err != nil {
return nil, err
}
rules = append(rules, rule)
}
if hasParameterRule(param.InQuery) {
rule, err := newParameterRule(name, InQuery, param.InQuery)
if err != nil {
return nil, err
}
rules = append(rules, rule)
}
if hasParameterRule(param.InFormData) {
rule, err := newParameterRule(name, InFormData, param.InFormData)
if err != nil {
return nil, err
}
rules = append(rules, rule)
}
return rules, nil
}
func hasParameterRule(valueInfo *ValueInfo) bool {
if valueInfo == nil {
return false
}
if valueInfo.hasEnum() {
return true
}
if valueInfo.hasMax() {
return true
}
if valueInfo.hasMin() {
return true
}
if valueInfo.isBool() {
return true
}
if valueInfo.Required {
return true
}
return false
}
func newParameterRule(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
if err := valueInfo.check(); err != nil {
return nil, err
}
if valueInfo.isString() {
return newParameterRuleString(name, in, valueInfo)
}
if valueInfo.isInt() {
return newParameterRuleInt(name, in, valueInfo)
}
if valueInfo.isUint() {
return newParameterRuleUint(name, in, valueInfo)
}
if valueInfo.isBool() {
return newParameterRuleBool(name, in, valueInfo)
}
if valueInfo.isFloat() {
return newParameterRuleFloat(name, in, valueInfo)
}
return nil, errors.New("invalid valueInfo")
}
func newParameterRuleString(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
rule := ¶meterRuleString{parameterRuleBase: parameterRuleBase{
Name: name,
In: in,
Required: valueInfo.Required,
}}
if in == InPath {
rule.Required = true
}
if valueInfo.hasEnum() {
rule.Enum = make(map[string]bool, 0)
enumTypes := strings.Fields(valueInfo.Enum)
for _, enumType := range enumTypes {
rule.Enum[enumType] = true
}
}
return rule, nil
}
func newParameterRuleBool(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
return ¶meterRuleBool{parameterRuleBase: parameterRuleBase{
Name: name,
In: in,
Required: valueInfo.Required,
}}, nil
}
func newParameterRuleInt(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
rule := ¶meterRuleInt{parameterRuleBase: parameterRuleBase{
Name: name,
In: in,
Required: valueInfo.Required,
}}
rule.BitSize = valueInfo.getBitSize()
if valueInfo.hasEnum() {
rule.Enum = make(map[int64]bool, 0)
enumTypes := strings.Fields(valueInfo.Enum)
for _, enumType := range enumTypes {
num, err := strconv.ParseInt(enumType, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Enum[num] = true
}
}
if valueInfo.hasMin() {
rule.HasMin = true
num, err := strconv.ParseInt(valueInfo.Min, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Min = num
}
if valueInfo.hasMax() {
rule.HasMax = true
num, err := strconv.ParseInt(valueInfo.Max, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Max = num
}
return rule, nil
}
func newParameterRuleUint(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
rule := ¶meterRuleUint{parameterRuleBase: parameterRuleBase{
Name: name,
In: in,
Required: valueInfo.Required,
}}
rule.BitSize = valueInfo.getBitSize()
if valueInfo.hasEnum() {
rule.Enum = make(map[uint64]bool, 0)
enumTypes := strings.Fields(valueInfo.Enum)
for _, enumType := range enumTypes {
num, err := strconv.ParseUint(enumType, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Enum[num] = true
}
}
if valueInfo.hasMin() {
rule.HasMin = true
num, err := strconv.ParseUint(valueInfo.Min, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Min = num
}
if valueInfo.hasMax() {
rule.HasMax = true
num, err := strconv.ParseUint(valueInfo.Max, 10, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Max = num
}
return rule, nil
}
func newParameterRuleFloat(name string, in string, valueInfo *ValueInfo) (parameterRule, error) {
rule := ¶meterRuleFloat{parameterRuleBase: parameterRuleBase{
Name: name,
In: in,
Required: valueInfo.Required,
}}
rule.BitSize = valueInfo.getBitSize()
if valueInfo.hasMin() {
rule.HasMin = true
num, err := strconv.ParseFloat(valueInfo.Min, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Min = num
}
if valueInfo.hasMax() {
rule.HasMax = true
num, err := strconv.ParseFloat(valueInfo.Max, valueInfo.getBitSize())
if err != nil {
return nil, err
}
rule.Max = num
}
return rule, nil
}
|
func reverseList(head *ListNode) *ListNode {
var tmp *ListNode = nil
var last *ListNode = nil
for i := head; i != nil; i = tmp {
tmp = i.Next
i.Next = last
last = i
}
return last
}
|
package main
import (
"context"
"fmt"
"net"
"os"
"os/signal"
"syscall"
"github.com/birchwood-langham/portdb-ws/db"
"github.com/birchwood-langham/portdb-ws/db/pg"
pd "github.com/birchwood-langham/portdb-ws/protocol"
"github.com/birchwood-langham/web-service-bootstrap/config"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"google.golang.org/grpc"
)
type portDomainServer struct {
DB db.PortDB
}
func newPortDomainServer(connectDB func() (db.PortDB, error)) (pds portDomainServer, err error) {
var portDB db.PortDB
if portDB, err = connectDB(); err != nil {
return pds, err
}
pds.DB = portDB
return pds, nil
}
func (pds portDomainServer) SavePort(ctx context.Context, p *pd.Port) (resp *pd.SaveResponse, err error) {
err = pds.DB.AddPort(p)
if err != nil {
return
}
resp = &pd.SaveResponse{
Success: true,
}
return
}
func (pds portDomainServer) FindPort(ctx context.Context, p *pd.PortData) (ports *pd.Ports, err error) {
ports, err = pds.DB.FindPorts(p)
return
}
func (pds portDomainServer) GetPort(ctx context.Context, r *pd.GetPortRequest) (port *pd.Port, err error) {
port, err = pds.DB.GetPort(r.GetSymbol())
return
}
// This package provides the application file for the gRPC services
func initializeViper() {
viper.AddConfigPath(".")
viper.AddConfigPath("./conf")
viper.SetConfigName("application")
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func main() {
initializeViper()
port := config.Get("ports-domain-service", "port").Int(9999)
log.Printf("Starting Ports Domain Service on port: %d", port)
done := make(chan struct{})
go serve(port, done)
<-done
}
func serve(port int, done chan struct{}) {
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatalf("could not listen on port %d: %v", port, err)
}
// listen for any term or interrupt signals from the OS and perform clean up
go func() {
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, syscall.SIGINT, syscall.SIGTERM)
<-signalChannel
fmt.Println("Cleaning up services")
// perform any cleanup we need to do here
// e.g. disconnect from databases etc.
fmt.Println("Shutting down Port Domain service")
done <- struct{}{}
}()
srv := grpc.NewServer()
var pds portDomainServer
if config.Get("db", "use-test-db").Bool(false) {
log.Warnf("Using in-memory test database. Only use this if you're doing tests against the gRPC requests for convenience only!!!")
pds, err = newPortDomainServer(connectTestDB)
} else {
pds, err = newPortDomainServer(connectDB)
}
if err != nil {
log.Fatalf("Could not connect to Ports database: %v", err)
}
pd.RegisterPortDomainServiceServer(srv, pds)
if err := srv.Serve(lis); err != nil {
log.Printf("could not serve Song Match service: %v", err)
}
}
func connectDB() (db.PortDB, error) {
config, err := pg.NewConnectionPropertiesFromSettings("db.pg")
if err != nil {
return nil, err
}
portDB := pg.New(config)
err = portDB.Connect()
if err != nil {
return nil, err
}
return portDB, nil
}
func connectTestDB() (db.PortDB, error) {
portDB := db.NewTestDB()
err := portDB.Connect()
if err != nil {
return nil, err
}
return portDB, nil
}
|
package main
import (
"fmt"
"io"
"os"
"reflect"
)
func main() {
// 1. 通过反射判断一个值的类型
for _, v := range []interface{}{"hi", 123, true, 90.99, func() {}} {
switch v := reflect.ValueOf(v); v.Kind() {
case reflect.String:
fmt.Printf("%s 是一个字符串\n", v.String())
case reflect.Bool:
fmt.Printf("%t 是一个布尔值\n", v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Printf("%d 是一个数字\n", v.Int())
default:
fmt.Printf("%s 其他类型\n", v.Kind())
}
}
// 2. 判断一个struct中的字段标签
type S struct {
F string `color:"red"`
}
s := S{}
st := reflect.TypeOf(s)
field := st.Field(0)
fmt.Println(field.Tag.Get("color")) // red
// 3. 判断两个对象是否相等
// ps: 使用DeepEqual的性能比自己遍历循环还差
a := []int{1, 2, 3}
b := []int{1, 2, 3}
fmt.Println(reflect.DeepEqual(a, b)) // true
// 4. 判断一个类型是否实现了该接口
writerType := reflect.TypeOf((*io.Writer)(nil)).Elem()
fileType := reflect.TypeOf((*os.File)(nil))
fmt.Println(fileType.Implements(writerType)) // true
// =================== 断言 ============================
for _, v := range []interface{}{"hi", 123, true, 90.99, func() {}} {
switch v.(type) {
case string:
fmt.Println("字符串:", v)
case int:
fmt.Println("数字:", v)
case bool:
fmt.Println("布尔值:", v)
default:
fmt.Println("其他类型:", v)
}
}
}
|
package constants
type OrderStatus struct {
New uint8
Checked uint8
Paid uint8
Canceled uint8
}
var ORDER_STATUS = OrderStatus{
New: 0,
Checked: 1,
Paid: 2,
Canceled: 3,
}
|
package spark
import (
"github.com/gin-gonic/gin"
)
// ApplyRoutes applies router to the gin Engine
func ApplyRoutes(r *gin.RouterGroup) {
posts := r.Group("/spark")
{
posts.POST("/pv_by_urls", PostDailyPV)
posts.POST("/pv_monthly_by_urls", PostMonthlyPV)
posts.POST("/total_pv_by_urls", PostTotalPV)
posts.POST("/move_zi_article_pv", PostMovePV)
posts.GET("/pv_by_author", GetAuthorPV)
posts.GET("/pv_by_hostname", GetHostPV)
}
}
|
package main
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
//標準入力を取得
stdin, err := FetchStdin()
if err != nil {
panic(err)
}
// fmt.Println(stdin)
//問題文に沿ってデータを整形
pd, err := formatPracticeData(stdin)
if err != nil {
panic(err)
}
// tools.PrintStruct(pd)
//判定処理
output := isProductEvenOrOdd(pd)
fmt.Println(output)
}
// 問題文で指定された入力データ形式
type practiceData struct {
a int
b int
}
// 標準出力を整形して問題文で指定された入力形式に整形
func formatPracticeData(stdin []string) (data practiceData, err error) {
sd := strings.Split(stdin[0], " ")
data.a = EasyAtoi(sd[0])
data.b = EasyAtoi(sd[1])
//検査
if data.a < 1 || data.a > 10000 {
err = errors.New("aの値が不正です")
return
}
if data.b < 1 || data.b > 10000 {
err = errors.New("bの値が不正です")
return
}
return
}
// 入力値の積を判定する
func isProductEvenOrOdd(pd practiceData) (output string) {
p := pd.a * pd.b
if (p % 2) == 0 {
output = "Even"
} else {
output = "Odd"
}
return
}
//提出用にtoolsライブラリをコピペする
// 標準入力からデータを受け取る
func FetchStdin() (result []string, err error) {
sc := bufio.NewScanner(os.Stdin)
if sc.Err() != nil {
err = sc.Err()
return
}
for sc.Scan() {
result = append(result, sc.Text())
}
return
}
// strconv.Atoiのエラー処理付きのシンタックスシュガー
func EasyAtoi(s string) (i int) {
i, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return
}
// 構造体をデバッグ出力するときのシンタックスシュガー
func PrintStruct(st interface{}) {
fmt.Printf("%+v\n", st)
}
|
package http
import (
"bytes"
"encoding/json"
"strings"
"github.com/miRemid/mio"
"github.com/miRemid/mioqq"
)
// CQParams 参数
type CQParams map[string]interface{}
// CQContext 用户对话
type CQContext struct {
Context *mio.Context
API *mioqq.API
handlers []HandleFunc
index int
Params []string
quick bool
ws bool
Time int64 `json:"time"`
SelfID int64 `json:"self_id"`
PostType string `json:"post_type"`
MessageType string `json:"message_type,omitempty"`
SubType string `json:"sub_type,omitempty"`
MessageID int64 `json:"message_id,omitempty"`
GroupID int64 `json:"group_id,omitempty"`
DiscussID int64 `json:"discuss_id,omitempty"`
UserID int64 `json:"user_id"`
Font int32 `json:"font,omitempty"`
Message interface{} `json:"message,omitempty"`
RawMessage string `json:"raw_message,omitempty"`
Anonymous interface{} `json:"anonymous,omitempty"`
AnonymousFlag string `json:"anonymous_flag,omitempty"`
Sender *User `json:"sender"`
NoticeType string `json:"notice_type,omitempty"`
OperatorID int64 `json:"operator_id,omitempty"`
File *File `json:"file,omitempty"`
RequestType string `json:"request_type"`
Flag string `json:"flag,omitempty"`
Comment string `json:"comment"`
Duration int64 `json:"duration"`
}
// Next 进行下一个中间件
func (context *CQContext) Next() {
context.index++
s := len(context.handlers)
if context.index != s {
context.handlers[context.index](context)
}
}
// CmdParser 解析Cmd命令
func (context *CQContext) CmdParser(message string, cmds ...string) (cmd string, params []string) {
msg := strings.TrimSpace(message)
split := strings.Split(msg, " ")
tcmd := split[0]
if tcmd == "" {
return "", nil
}
for _, v := range cmds {
if len(v) == len(tcmd) && v != tcmd || len(v) > len(tcmd) {
continue
}
if v == tcmd[:len(v)] {
if len(tcmd[len(v):]) == 0 {
continue
}
cmd = tcmd[len(v):]
if len(split) == 1 {
params = nil
} else {
params = split[1:]
}
break
}
}
return
}
// JSON quick response
func (context *CQContext) JSON(code int, body interface{}) error {
if context.ws {
return nil
}
if context.quick {
return nil
}
if body == nil {
return nil
}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(body); err != nil {
return err
}
context.Context.Writer.Write(buf.Bytes())
context.Context.Writer.WriteHeader(code)
context.quick = true
return nil
}
// Send 发送一条字符串消息
func (context *CQContext) Send(message string, async bool) (mioqq.CQAPIResponse, error) {
var msg *mioqq.Message
switch context.MessageType {
case "private":
if async {
msg = context.API.NewMessage(context.UserID, mioqq.PrivateMessageAsync, mioqq.StringContent)
} else {
msg = context.API.NewMessage(context.UserID, mioqq.PrivateMessage, mioqq.StringContent)
}
break
case "group":
if async {
msg = context.API.NewMessage(context.UserID, mioqq.GroupMessageAsync, mioqq.StringContent)
} else {
msg = context.API.NewMessage(context.UserID, mioqq.GroupMessage, mioqq.StringContent)
}
break
case "discuss":
if async {
msg = context.API.NewMessage(context.UserID, mioqq.DiscussMessageAsync, mioqq.StringContent)
} else {
msg = context.API.NewMessage(context.UserID, mioqq.DiscussMessage, mioqq.StringContent)
}
break
}
msg.Text(message)
return context.API.Send(msg)
}
// User is the sender of cqevent
type User struct {
ID int64 `json:"user_id"`
NickName string `json:"nickname"`
Sex string `json:"sex"`
Age int `json:"age"`
Area string `json:"area,omitempty"`
Card string `json:"card,omitempty"`
CardChangeable bool `json:"card_changeable,omitempty"`
Title string `json:"title,omitempty"`
TitleExpireTimeUnix int64 `json:"title_expire_time,omitempty"`
Level string `json:"level,omitempty"`
Role string `json:"role,omitempty"`
Unfriendly bool `json:"unfriendly,omitempty"`
JoinTimeUnix int64 `json:"join_time,omitempty"`
LastSentTimeUnix int64 `json:"last_sent_time,omitempty"`
AnonymousID int64 `json:"anonymous_id,omitempty" anonymous:"id"`
AnonymousName string `json:"anonymous_name,omitempty" anonymous:"name"`
AnonymousFlag string `json:"anonymous_flag,omitempty" anonymous:"flag"`
}
// File is the cqhttp event file
type File struct {
ID string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
BusID int64 `json:"busid"`
}
|
package gaodeMap
import (
"fmt"
"errors"
)
type GaodeMapClient struct {
ak string
}
func NewGaodeMapClient(ak string) *GaodeMapClient {
return &GaodeMapClient{ak: ak}
}
func (ac *GaodeMapClient) GetAk() string {
return ac.ak
}
func (ac *GaodeMapClient) SetAk(ak string) {
ac.ak = ak
}
func (ac *GaodeMapClient) GetRoute(lat1, lng1, lat2, lng2 string) (*StructRoute, error) {
res := new(StructRoute)
parameter := fmt.Sprintf("origin=%s,%s&destination=%s,%s&key=%s", lng1, lat1, lng2, lat2, ac.GetAk())
reqURL := fmt.Sprintf("%s%s", reqURLForRoute, parameter)
res2, err := requestGaode("GetRoute", reqURL)
if err != nil {
return res, err
}
if res2.(*StructRoute).Status != "1" {
message := fmt.Sprintf("error:%s", res2.(*StructRoute).Message)
return res, errors.New(message)
}
res3 := res2.(*StructRoute)
return res3, nil
}
|
package postgres
import (
"context"
"github.com/jmoiron/sqlx"
"github.com/quintans/go-clean-ddd/internal/domain"
"github.com/quintans/go-clean-ddd/internal/domain/customer"
)
type CustomerViewRepository struct {
client *sqlx.DB
}
func NewCustomerViewRepository(db *sqlx.DB) CustomerViewRepository {
return CustomerViewRepository{
client: db,
}
}
func (r CustomerViewRepository) GetAll(ctx context.Context) ([]customer.Customer, error) {
customers := []Customer{}
err := r.client.SelectContext(ctx, &customers, "SELECT * FROM customers")
if err != nil {
return nil, errorMap(err)
}
return toDomainCustomers(customers)
}
func (r CustomerViewRepository) GetByEmail(ctx context.Context, email domain.Email) (customer.Customer, error) {
c := Customer{}
err := r.client.GetContext(ctx, &c, "SELECT * FROM customers WHERE email=$1", email.String())
if err != nil {
return customer.Customer{}, errorMap(err)
}
return toDomainCustomer(c)
}
|
/*
* Copyright (C) 2017-Present Pivotal Software, Inc. All rights reserved.
*
* This program and the accompanying materials are made available under
* the terms of the under the Apache License, Version 2.0 (the "License”);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package instance
import (
"code.cloudfoundry.org/cli/plugin"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/cfutil"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/serviceutil"
)
//go:generate counterfeiter . Operation
type Operation interface {
Run(serviceInstanceAdminParameters serviceutil.ManagementParameters, accessToken string) (string, error)
IsServiceBrokerOperation() bool
}
type OperationRunner interface {
RunOperation(serviceInstanceName string, operation Operation) (string, error)
}
type authenticatedOperationRunner struct {
cliConnection plugin.CliConnection
serviceInstanceUrlResolver serviceutil.ServiceInstanceResolver
}
func NewAuthenticatedOperationRunner(
cliConnection plugin.CliConnection,
serviceInstanceUrlResolver serviceutil.ServiceInstanceResolver) OperationRunner {
return &authenticatedOperationRunner{
cliConnection: cliConnection,
serviceInstanceUrlResolver: serviceInstanceUrlResolver,
}
}
func (aor *authenticatedOperationRunner) RunOperation(
serviceInstanceName string,
operation Operation) (string, error) {
accessToken, err := cfutil.GetToken(aor.cliConnection)
if err != nil {
return "", err
}
managementParameters, err := aor.serviceInstanceUrlResolver.GetManagementParameters(
serviceInstanceName,
accessToken,
operation.IsServiceBrokerOperation())
if err != nil {
return "", err
}
return operation.Run(managementParameters, accessToken)
}
|
package kucoin
import (
"testing"
"time"
)
func TestApiService_HfPlaceOrder(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
clientOid := IntToString(time.Now().Unix())
p := map[string]string{
"clientOid": clientOid,
"symbol": "MATIC-USDT",
"type": "limit",
"side": "sell",
"stp": "CN",
"size": "0.1",
"price": "3.0",
}
rsp, err := s.HfPlaceOrder(p)
if err != nil {
t.Fatal(err)
}
v := &HfPlaceOrderRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
if v.OrderId == "" {
t.Error("Empty key 'orderId'")
}
}
func TestApiService_HfSyncPlaceOrder(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
clientOid := IntToString(time.Now().Unix())
p := map[string]string{
"clientOid": clientOid,
"symbol": "MATIC-USDT",
"type": "market",
"side": "sell",
"stp": "CN",
"tags": "t",
"remark": "r",
"size": "0.1",
}
rsp, err := s.HfSyncPlaceOrder(p)
if err != nil {
t.Fatal(err)
}
v := &HfSyncPlaceOrderRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
if v.OrderId == "" {
t.Error("Empty key 'orderId'")
}
}
func TestApiService_HfPlaceMultiOrders(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
clientOid := IntToString(time.Now().Unix())
p := make([]*HFCreateMultiOrderModel, 0)
p = append(p, &HFCreateMultiOrderModel{
ClientOid: clientOid,
Symbol: "MATIC-USDT",
OrderType: "market",
Side: "sell",
Size: "0.1",
})
clientOid2 := IntToString(time.Now().Unix())
p = append(p, &HFCreateMultiOrderModel{
ClientOid: clientOid2,
Symbol: "MATIC-USDT",
OrderType: "market",
Side: "sell",
Size: "0.1",
})
rsp, err := s.HfPlaceMultiOrders(p)
if err != nil {
t.Fatal(err)
}
v := &HfPlaceMultiOrdersRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfSyncPlaceMultiOrders(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
clientOid := IntToString(time.Now().Unix())
p := make([]*HFCreateMultiOrderModel, 0)
p = append(p, &HFCreateMultiOrderModel{
ClientOid: clientOid,
Symbol: "MATIC-USDT",
OrderType: "market",
Side: "buy",
Size: "0.1",
})
clientOid2 := IntToString(time.Now().Unix())
p = append(p, &HFCreateMultiOrderModel{
ClientOid: clientOid2,
Symbol: "MATIC-USDT",
OrderType: "market",
Side: "buy",
Size: "0.2",
})
rsp, err := s.HfSyncPlaceMultiOrders(p)
if err != nil {
t.Fatal(err)
}
v := &HfSyncPlaceMultiOrdersRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfObtainFilledOrders(t *testing.T) {
s := NewApiServiceFromEnv()
p := map[string]string{
"symbol": "MATIC-USDT",
}
rsp, err := s.HfObtainFilledOrders(p)
if err != nil {
t.Fatal(err)
}
v := &HfFilledOrdersModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
if v == nil {
return
}
for _, o := range v.Items {
t.Log(ToJsonString(o))
switch {
case o.Id == "":
t.Error("Empty key 'id'")
case o.Symbol == "":
t.Error("Empty key 'symbol'")
case o.OpType == "":
t.Error("Empty key 'opType'")
case o.Type == "":
t.Error("Empty key 'type'")
case o.Side == "":
t.Error("Empty key 'side'")
}
}
}
func TestApiService_HfObtainActiveOrders(t *testing.T) {
s := NewApiServiceFromEnv()
rsp, err := s.HfObtainActiveOrders("MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := HfOrdersModel{}
if err := rsp.ReadData(&v); err != nil {
t.Fatal(err)
}
for _, o := range v {
t.Log(ToJsonString(o))
switch {
case o.Id == "":
t.Error("Empty key 'id'")
case o.Symbol == "":
t.Error("Empty key 'symbol'")
case o.OpType == "":
t.Error("Empty key 'opType'")
case o.Type == "":
t.Error("Empty key 'type'")
case o.Side == "":
t.Error("Empty key 'side'")
}
}
}
func TestApiService_HfObtainActiveSymbols(t *testing.T) {
s := NewApiServiceFromEnv()
rsp, err := s.HfObtainActiveSymbols()
if err != nil {
t.Fatal(err)
}
v := HfSymbolsModel{}
if err := rsp.ReadData(&v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfOrderDetail(t *testing.T) {
s := NewApiServiceFromEnv()
rsp, err := s.HfOrderDetail("649a45d576174800019e44b4", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfOrderModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfOrderDetailByClientOid(t *testing.T) {
s := NewApiServiceFromEnv()
rsp, err := s.HfOrderDetailByClientOid("1687832021", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfOrderModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
// 649a45d576174800019e44b4
func TestApiService_HfModifyOrder(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
p := map[string]string{
"symbol": "MATIC-USDT",
"orderId": "649a45d576174800019e44b4",
"newPrice": "2.0",
}
rsp, err := s.HfModifyOrder(p)
if err != nil {
t.Fatal(err)
}
v := &HfModifyOrderRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfQueryAutoCancelSetting(t *testing.T) {
s := NewApiServiceFromEnv()
rsp, err := s.HfQueryAutoCancelSetting()
if err != nil {
t.Fatal(err)
}
v := &AUtoCancelSettingModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfAutoCancelSetting(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfAutoCancelSetting(10000, "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfAutoCancelSettingRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfCancelOrder(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfCancelOrder("649a49201a39390001adcce8", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfOrderIdModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfSyncCancelOrder(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfSyncCancelOrder("649a49201a39390001adcce8", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfSyncCancelOrderRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfCancelOrderByClientId(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfCancelOrderByClientId("649a49201a39390001adcce8", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfClientOidModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfSyncCancelOrderByClientId(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfSyncCancelOrderByClientId("649a49201a39390001adcce8", "MATIC-USDT")
if err != nil {
t.Fatal(err)
}
v := &HfSyncCancelOrderRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfSyncCancelOrderWithSize(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfSyncCancelOrderWithSize("649a49201a39390001adcce8", "MATIC-USDT", "0.3")
if err != nil {
t.Fatal(err)
}
v := &HfSyncCancelOrderWithSizeRes{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(v))
}
func TestApiService_HfSyncCancelAllOrders(t *testing.T) {
t.SkipNow()
s := NewApiServiceFromEnv()
rsp, err := s.HfSyncCancelAllOrders("MATIC-USDT")
if err != nil {
t.Fatal(err)
}
data := new(string)
if err := rsp.ReadData(data); err != nil {
t.Fatal(err)
}
t.Log(ToJsonString(data))
}
func TestApiService_HfTransactionDetails(t *testing.T) {
s := NewApiServiceFromEnv()
p := map[string]string{
"symbol": "MATIC-USDT",
}
rsp, err := s.HfTransactionDetails(p)
if err != nil {
t.Fatal(err)
}
v := &HfTransactionDetailsModel{}
if err := rsp.ReadData(v); err != nil {
t.Fatal(err)
}
if v == nil {
return
}
for _, item := range v.Items {
t.Log(ToJsonString(item))
}
}
|
package galaxy
import (
"../gfx"
"../basic"
"github.com/go-gl/gl/v4.1-core/gl"
"math/rand"
"math"
)
type Galaxy struct {
stars []*star
}
const (
G = 0.00001
ETA = 0.0001
dt = 0.001
DistanceThreshold = 0.5
largeCount = 500
)
func NewGaraxy(smallCount int) *Galaxy {
stars := make([]*star, largeCount+smallCount)
for i := 0; i < largeCount ; i++ {
r := rand.Float64()*0.5 + 0.000001
theta := rand.Float64()*2*math.Pi
eps := 0.5
V := 0.012
V = 0.0
p := &basic.Point{
X: r*math.Cos(theta),
Y: eps*r*math.Sin(theta)}
v := &basic.Point{
X: -eps*V*r*math.Sin(theta),
Y: V*r*math.Cos(theta)}
stars[i] = newStar(p, v,
1.0/(r*r), i)
}
vertShader, err := gfx.NewShaderFromFile("galaxy/shaders/basic.vert", gl.VERTEX_SHADER)
if err != nil {
panic(err)
}
fragShader, err := gfx.NewShaderFromFile("galaxy/shaders/basic.frag", gl.FRAGMENT_SHADER)
if err != nil {
panic(err)
}
program, err := gfx.NewProgram(vertShader, fragShader)
if err != nil {
panic(err)
}
program.Use()
return &Galaxy{
stars: stars,
}
}
var Cnt = 0
func search(n *node, s *star) {
calcforce := func(p *basic.Point, m float64) {
//log.Println(p)
d := p.Sub(s.Current)
force := d.Mult(G*s.mass*m/(math.Pow(d.Length(), 3)+ETA))
//log.Println(force)
s.force = s.force.Add(force)
Cnt += 1
}
if len(n.stars) == 0 {
return
}
if len(n.stars) == 1 {
if s.number == n.stars[0].number {
return
}
other := n.stars[0]
calcforce(other.Current, other.mass)
//log.Println(other)
return
}
if (n.xMax - n.xMin)/n.balance.Sub(s.Current).Length() <= DistanceThreshold {
//calcforce(n.balance, n.mass)
} else {
for _, child := range n.children {
search(child, s)
}
}
}
func (g *Galaxy) Update() {
//for i, starFrom := range g.stars {
// for j, starTo := range g.stars {
// if i == j {
// continue
// }
// d := starFrom.Current.Sub(starTo.Current)
// force := d.Mult(G*starFrom.mass*starTo.mass/(math.Pow(d.Length(), 3)+ETA))
// //log.Println(force)
//
// starTo.force = starTo.force.Add(force)
// }
//}
//log.Println(g.stars[0].force.Length(), g.stars[1].force.Length())
//log.Println(g.stars[0].prev, g.stars[1].prev)
root := g.Tree()
for _, star := range g.stars {
star.force = basic.Zero()
Cnt = 0
search(root, star)
//log.Println(Cnt)
}
//log.Println(g.stars[0].force.Length(), g.stars[1].force.Length())
//log.Println("----------------")
//log.Println("current", g.stars[0].Current, g.stars[1].Current)
//log.Println("force", g.stars[0].force, g.stars[1].force)
//log.Println("mass", g.stars[0].mass, g.stars[1].mass)
//log.Println("prev", g.stars[0].prev, g.stars[1].prev)
for _, star := range g.stars {
star.accelerate(dt)
}
//log.Println("next", g.stars[0].Current, g.stars[1].Current)
}
func (g *Galaxy) Draw() {
points := make([]float32, (VertexCount*3)*7*len(g.stars))
for _, star := range g.stars {
points = append(points, star.array()...)
}
VAO := makeVao(points)
gl.BindVertexArray(VAO)
for i := range g.stars {
gl.DrawArrays(gl.TRIANGLES, int32((VertexCount*3)*7*i), VertexCount*3*7)
}
}
func (g *Galaxy) DrawDebug() {
//nodes := g.Tree()
//
//points := make([]*basic.Point, 0)
//children := make([]*node, 0)
//n := nodes[400]
//
//points = append(points, n.balance)
//
//
//beforeParent := n
//currentParent := n.parent
//
//cnt := 0
//depth := 0
//for ; currentParent != nil; {
// depth += 1
// for _, child := range currentParent.children {
// if child == beforeParent {
// continue
// }
// cnt += 1
// children = append(children, child)
// points = append(points, child.balance)
// }
// beforeParent = currentParent
// currentParent = currentParent.parent
//}
//
////log.Println(depth, cnt)
//
//array := make([]float32, (VertexCount*3)*7*len(g.stars))
//
//for i, p := range points {
// col := float32(i+1)/float32(len(points))
// col = 1.0
// array = append(array, pointArray(p, col)...)
//}
//
//VAO := makeVao(array)
//gl.BindVertexArray(VAO)
//
//for i := range g.stars {
// gl.DrawArrays(gl.TRIANGLES, int32((VertexCount*3)*7*i), VertexCount*3*7)
//}
//
//balanceArray := make([]float32, 0)
//
//for _, c := range children {
// balanceArray = append(balanceArray, pointLineArray(c)...)
//}
//
//VAO2 := makeVao(balanceArray)
//gl.BindVertexArray(VAO2)
//
//for i := range children {
// gl.DrawArrays(gl.LINE_LOOP, int32(4*i), 4)
//}
//log.Println(len(points))
}
//func pointLineArray(n *node) []float32 {
// array := make([]float32, 28)
// array[0], array[1], array[2] = n.xMin, n.yMin, 0.0
// array[3], array[4], array[5], array[6] = 1.0, 1.0, 1.0, 1.0
//
// array[7*1+0], array[7*1+1], array[7*1+2] = n.xMin, n.yMax, 0.0
// array[7*1+3], array[7*1+4], array[7*1+5], array[7*1+6] = 1.0, 1.0, 1.0, 1.0
//
// array[7*2+0], array[7*2+1], array[7*2+2] = n.xMax, n.yMax, 0.0
// array[7*2+3], array[7*2+4], array[7*2+5], array[7*2+6] = 1.0, 1.0, 1.0, 1.0
//
// array[7*3+0], array[7*3+1], array[7*3+2] = n.xMax, n.yMin, 0.0
// array[7*3+3], array[7*3+4], array[7*3+5], array[7*3+6] = 1.0, 1.0, 1.0, 1.0
//
// return array
//}
func pointArray(p *basic.Point, col float32) []float32 {
array := make([]float32, (VertexCount*3)*7)
for i := 0; i < VertexCount; i++ {
r := 0.02
theta := math.Pi*2.0*float64(i)/ VertexCount
array[(i*3)*7+0], array[(i*3)*7+1], array[(i*3)*7+2] = p.Elements()
array[(i*3)*7+3], array[(i*3)*7+4], array[(i*3)*7+5], array[(i*3)*7+6] = col, float32(1.0), float32(1.0), float32(1.0)
array[(i*3+1)*7+0], array[(i*3+1)*7+1], array[(i*3+1)*7+2] = p.Add(
&basic.Point{
X: r*math.Cos(theta),
Y: r*math.Sin(theta),
}).Elements()
array[(i*3+1)*7+3], array[(i*3+1)*7+4], array[(i*3+1)*7+5], array[(i*3+1)*7+6] = col, float32(1.0), float32(1.0), float32(1.0)
theta2 := math.Pi*2.0*float64(i+1)/ VertexCount
array[(i*3+2)*7+0], array[(i*3+2)*7+1], array[(i*3+2)*7+2] = p.Add(
&basic.Point{
X: r*math.Cos(theta2),
Y: r*math.Sin(theta2),
}).Elements()
array[(i*3+2)*7+3], array[(i*3+2)*7+4], array[(i*3+2)*7+5], array[(i*3+2)*7+6] = col, float32(1.0), float32(1.0), float32(1.0)
}
return array
}
func makeVao(array []float32) uint32 {
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, 4*len(array), gl.Ptr(array), gl.STATIC_DRAW)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
gl.EnableVertexAttribArray(0)
gl.EnableVertexAttribArray(1)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, 7*4, gl.PtrOffset(0))
gl.VertexAttribPointer(1, 4, gl.FLOAT, false, 7*4, gl.PtrOffset(3*4))
return vao
}
|
package wrpc
/**
@author shuai.chen
@created 2020年1月8日
连接池实现
**/
import (
"sync"
"reflect"
"errors"
)
const MAX_SIZE int = 8
const MAX_ACTIVE_SIZE int = 4
const WAIT_TIMEOUT int = 10000 //ms
type CreateFuncType func(...string) (interface{}, error)
// pool block
type PoolBlock struct {
List *Queue //队列
Count int //引用计数
}
//base pool struct
type BasePool struct {
maxSize int
maxActiveSize int
waitTimeout int //Millisecond
function CreateFuncType
args []string
mutex sync.Mutex
}
//pool struct
type Pool struct {
BasePool
pb *PoolBlock
}
func NewPool(function CreateFuncType, maxSize int, maxActiveSize int, waitTimeout int) *Pool{
p := Pool{BasePool:BasePool{function:function, maxSize:maxSize, maxActiveSize:maxActiveSize,
waitTimeout:waitTimeout},
pb:&PoolBlock{List:NewQueue(), Count:0}}
if maxSize <= 0 { p.maxSize = MAX_SIZE }
if maxActiveSize <= 0 { p.maxActiveSize = MAX_ACTIVE_SIZE }
if waitTimeout <= 0 { p.waitTimeout = WAIT_TIMEOUT }
return &p
}
/**
pool interval method
**/
func (p *Pool) getObj() (interface{}, error){
return p.function(p.args...)
}
func (p *Pool) putObj(obj *Element){
p.mutex.Lock() //加锁
if p.Size() < p.maxSize {
p.pb.List.Put(obj)
}else{
go closeObj(obj)
}
p.mutex.Unlock() //解锁
}
/**
pool external method
**/
// 获取连接池大小
func (p *Pool) Size() int{
return p.pb.List.Length()
}
// 清空连接池
func (p *Pool) Clear(){
p.mutex.Lock() //加锁
var e *Element
for{
e = p.pb.List.Get()
if e != nil{
go closeObj(e)
}else{
break
}
}
p.pb.Count = 0
p.mutex.Unlock() //解锁
}
func (p *Pool) Borrow() (*Element, error){
p.mutex.Lock() //加锁
defer p.mutex.Unlock() //解锁
if p.Size() <= 0 && p.pb.Count < p.maxSize{
genObj, err := p.getObj()
if err != nil{
return nil, err
}
p.pb.List.PutValue(genObj)
p.pb.Count += 1
}
obj := p.pb.List.GetWait(p.waitTimeout)
if obj != nil{
return obj, nil
}
return nil, errors.New("Queue is empty.")
}
func (p *Pool) Return(obj *Element){
if p.Size() < p.maxActiveSize{
p.putObj(obj)
}else{
p.Destroy(obj)
}
}
func (p *Pool) Destroy(obj *Element){
if obj != nil{
p.mutex.Lock() //加锁
go closeObj(obj)
if p.pb.Count > 0{
p.pb.Count -= 1
}
p.mutex.Unlock() //解锁
}
}
//keyedPool struct
type KeyedPool struct {
BasePool
pb map[string]*PoolBlock
}
func NewKeyedPool(function CreateFuncType, maxSize int, maxActiveSize int, waitTimeout int) *KeyedPool{
kp := KeyedPool{BasePool:BasePool{function:function, maxSize: maxSize, maxActiveSize: maxActiveSize,
waitTimeout: waitTimeout},
pb:map[string]*PoolBlock{}}
if maxSize <= 0 { kp.maxSize = MAX_SIZE } //max size per key
if maxActiveSize <= 0 { kp.maxActiveSize = MAX_ACTIVE_SIZE } //max active size per key
if waitTimeout <= 0 { kp.waitTimeout = WAIT_TIMEOUT }
return &kp
}
/**
keyedPool interval method
**/
func (p *KeyedPool) check(key string){
_, ok := p.pb[key]
if !ok{
p.pb[key] = &PoolBlock{List:NewQueue(), Count:0}
}
}
func (p *KeyedPool) getObj() (interface{}, error){
return p.function(p.args...)
}
func (p *KeyedPool) putObj(obj *Element, key string){
p.mutex.Lock()
p.check(key)
if p.Size(key) < p.maxSize {
p.pb[key].List.Put(obj)
}else{
go closeObj(obj)
}
p.mutex.Unlock() //解锁
}
/**
keyedPool external method
**/
// 获取连接池大小
func (p *KeyedPool) Size(key string) int{
kp, ok := p.pb[key]
if ok{
return kp.List.Length()
}
return 0
}
// 清空连接池
func (p *KeyedPool) Clear(){
p.mutex.Lock() //加锁
for k := range p.pb{
var e *Element
for{
e = p.pb[k].List.Get()
if e != nil{
go closeObj(e)
}else{
break
}
}
p.pb[k].Count = 0
}
p.mutex.Unlock() //解锁
}
func (p *KeyedPool) Borrow(key string) (*Element, error){
p.mutex.Lock() //加锁
defer p.mutex.Unlock() //解锁
p.check(key)
if p.Size(key) <= 0 && p.pb[key].Count < p.maxSize{
p.args = []string{key}
genObj, err := p.getObj()
if err != nil{
return nil, err
}
p.pb[key].List.PutValue(genObj)
p.pb[key].Count += 1
}
obj := p.pb[key].List.GetWait(p.waitTimeout)
if obj != nil{
return obj, nil
}
return nil, errors.New("Queue is empty.")
}
func (p *KeyedPool) Return(obj *Element, key string){
if p.Size(key) < p.maxActiveSize{
p.putObj(obj, key)
}else{
p.Destroy(obj, key)
}
}
func (p *KeyedPool) Destroy(obj *Element, key string){
if obj != nil{
p.mutex.Lock() //加锁
go closeObj(obj)
if p.pb[key].Count > 0{
p.pb[key].Count -= 1
}
p.mutex.Unlock() //解锁
}
}
// 关闭对象
func closeObj(obj *Element){
if obj != nil{
ref := reflect.ValueOf(obj.Value)
method := ref.MethodByName("Close")
if (method.IsValid()) {
method.Call([]reflect.Value{})
}
}
}
|
package oauth
import (
"net/http"
"net/url"
"strings"
"testing"
)
// Make sure there's no panics such as nil pointer dereferences
func TestAuthorize(t *testing.T) {
method := "POST"
uri := "http://example.com"
consumer := &Consumer{"abc", "123"}
token := &Token{"xyz", "+∞"}
in, _ := http.NewRequest(method, uri, nil)
defer func() {
if r := recover(); r != nil {
t.Error(r)
}
}()
consumer.Authorize(in, token)
}
// Test example from https://dev.twitter.com/oauth/overview/creating-signatures
func TestSignature(t *testing.T) {
method := "POST"
body := strings.NewReader("status=Hello%20Ladies%20%2b%20Gentlemen%2c%20a%20signed%20OAuth%20request%21")
uri := "https://api.twitter.com/1/statuses/update.json?include_entities=true"
in, _ := http.NewRequest(method, uri, body)
in.Header.Set("Authorization", `OAuth `+
`oauth_consumer_key="xvz1evFS4wEEPTGEFPHBog",`+
`oauth_nonce="kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg",`+
`oauth_signature_method="HMAC-SHA1",`+
`oauth_timestamp="1318622958",`+
`oauth_token="370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb",`+
`oauth_version="1.0"`)
out := "tnnArxj06cWHq44gCs1OSKk%2FjLY%3D"
consumer := &Consumer{
"xvz1evFS4wEEPTGEFPHBog",
"kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw",
}
token := &Token{
"370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb",
"LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE",
}
if a := consumer.Signature(in, token); a != out {
t.Errorf("Signature() should be \n%v\n not %v", out, a)
}
}
// Test example from RFC 5849
func TestBaseUri(t *testing.T) {
in, _ := url.Parse("http://example.com/r%20v/X?id=123")
out := "http://example.com/r%20v/X"
if a := baseUri(in); a != out {
t.Errorf("baseUri(%v) should be %v not %v", in, out, a)
}
}
// Test example from https://dev.twitter.com/oauth/overview/percent-encoding-parameters
func TestEncode(t *testing.T) {
tests := map[string]string{
`Ladies + Gentlemen`: `Ladies%20%2B%20Gentlemen`,
`An encoded string!`: `An%20encoded%20string%21`,
`Dogs, Cats & Mice`: `Dogs%2C%20Cats%20%26%20Mice`,
`☃`: `%E2%98%83`,
}
for in, out := range tests {
if a := encode(in); a != out {
t.Errorf("encode(%v) should be %v not %v", in, out, a)
}
}
}
// Test example from RFC 5849
func TestRequestParameters(t *testing.T) {
method := "POST"
body := strings.NewReader("c2&a3=2+q")
uri := "https://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b"
in, _ := http.NewRequest(method, uri, body)
in.Header.Set("Authorization", `OAuth realm="Example",`+
`oauth_consumer_key="9djdj82h48djs9d2",`+
`oauth_token="kkk9d7dh3k39sjv7",`+
`oauth_signature_method="HMAC-SHA1",`+
`oauth_timestamp="137131201",`+
`oauth_nonce="7d8f3e4a",`+
`oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"`)
out := `a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=` +
`9djdj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-S` +
`HA1&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7`
if a := requestParameters(in); a != out {
t.Errorf("requestParameters() should be \n%v\n not \n%v\n", out, a)
}
}
|
package main;
import (
"encoding/json"
"strconv"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
// db part
type Tx struct{
Number uint64 `gorm: "not null"`
Hash string `gorm: "not null"`
Data string `gorm: "not null; unique; index;"`
}
func (tx *Tx) to_json() string {
data, err := json.Marshal(tx);
assert(err);
return string(data[:]);
}
type Allblue struct {}
func (a *Allblue) read(page int) []string {
db, err := gorm.Open("postgres", "dbname=allblue sslmode=disable");
assert(err); defer db.Close();
db.LogMode(true)
var tx []Tx;
db.Raw("SELECT * FROM txes order by number offset " + strconv.Itoa(page * 10) + " limit 10").Scan(&tx);
var res []string
for _, i := range(tx) {
res = append(res, i.to_json());
}
return res;
}
func (a *Allblue) search(ctx string, page int) []string {
db, err := gorm.Open("postgres", "dbname=allblue sslmode=disable");
assert(err); defer db.Close();
var tx []Tx;
db.Raw("SELECT * FROM txes where data ~* '" + ctx + "' order by number offset " + strconv.Itoa(page* 10)+ " limit 10").Scan(&tx);
var res []string
for _, i := range(tx) {
res = append(res, i.to_json());
// res = append(res, i.Hash);
// res = append(res, i.Data);
// res = append(res, strconv.FormatUint(i.Number, 10))
}
return res
}
|
package model
type OptionTime struct {
Id int `json:"id"`
Start string `json:"start"`
End string `json:"end"`
Disabled bool `json:"disabled"`
}
|
package goutils
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
)
// CheckFile checks if file exists returns "dir"|"file"|""
func CheckFile(filename string) string {
info, err := os.Stat(filename)
if err != nil {
return ""
}
if info.IsDir() {
return "dir"
}
return "file"
}
// ReadDir returns the filenames in directory
func ReadDir(dir string) []string {
filenames := []string{}
files, err := ioutil.ReadDir(dir)
if err != nil {
return filenames
}
for _, file := range files {
filenames = append(filenames, path.Join(dir, file.Name()))
}
return filenames
}
// ReadDirRecursive reads the directory recursively by given depth
func ReadDirRecursive(dir string) []string {
filenames := []string{}
filepath.Walk(dir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.Contains(info.Name(), ".jpg") {
filenames = append(filenames, path)
}
return nil
})
return filenames
}
// MkdirIfNot creates the dir if not exists
func MkdirIfNot(dir string) error {
_, err := os.Stat(dir)
if os.IsNotExist(err) {
os.Mkdir(dir, 0777)
return MkdirIfNot(dir)
}
return nil
}
|
package cascade
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/rs/zerolog"
"github.com/sirkon/goproxy/internal/errors"
"github.com/sirkon/goproxy"
)
var _ goproxy.Module = &cascadeModule{}
type cascadeModule struct {
mod string
reqMod string
url string
client *http.Client
basicAuth struct {
ok bool
user string
password string
}
}
func (s *cascadeModule) ModulePath() string {
return s.mod
}
func (s *cascadeModule) Versions(ctx context.Context, prefix string) (tags []string, err error) {
resp, err := s.makeRequest(ctx, fmt.Sprintf("%s/%s/@v/list", s.url, s.reqMod))
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil {
log := zerolog.Ctx(ctx)
log.Error().Err(err).Msg("closing list request response")
}
}()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "cascade reading out list request")
}
var res []string
for _, version := range strings.Split(string(data), "\n") {
version = strings.TrimSpace(version)
if len(version) > 0 {
res = append(res, version)
}
}
return res, nil
}
func (s *cascadeModule) Stat(ctx context.Context, rev string) (*goproxy.RevInfo, error) {
resp, err := s.makeRequest(ctx, fmt.Sprintf("%s/%s/@v/%s.info", s.url, s.reqMod, rev))
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil {
log := zerolog.Ctx(ctx)
log.Error().Err(err).Msg("closing stat request response")
}
}()
var dest goproxy.RevInfo
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&dest); err != nil {
return nil, errors.Wrapf(err, "cascade decoding stat data for %s", s.reqMod)
}
return &dest, nil
}
func (s *cascadeModule) GoMod(ctx context.Context, version string) (data []byte, err error) {
resp, err := s.makeRequest(ctx, fmt.Sprintf("%s/%s/@v/%s.mod", s.url, s.reqMod, version))
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil {
log := zerolog.Ctx(ctx)
log.Error().Err(err).Msg("closing mod response")
}
}()
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "cascade reading out mod request for %s", s.mod)
}
return
}
func (s *cascadeModule) Zip(ctx context.Context, version string) (file io.ReadCloser, err error) {
resp, err := s.makeRequest(ctx, fmt.Sprintf("%s/%s/@v/%s.zip", s.url, s.reqMod, version))
if err != nil {
return nil, err
}
return resp.Body, nil
}
func (s *cascadeModule) makeRequest(ctx context.Context, url string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, errors.Wrapf(err, "cascade making new request to %s", url)
}
if s.basicAuth.ok {
req.SetBasicAuth(s.basicAuth.user, s.basicAuth.password)
}
req = req.WithContext(ctx)
resp, err := s.client.Do(req)
if err != nil {
return nil, errors.Wrapf(err, "cascade getting response from %s", url)
}
if resp.StatusCode != http.StatusOK {
defer func() {
if err := resp.Body.Close(); err != nil {
zerolog.Ctx(ctx).Error().Err(err).Msgf("failed to close response body from %s", url)
}
}()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "cascade getting a response from %s", url)
}
return nil, errors.Newf("cascade unexpected status code %d (%s)", resp.StatusCode, string(data))
}
return resp, nil
}
|
// Package bot exports a Bot interface to manage bots
// for differents platforms easily.
package bot
import (
"fmt"
"github.com/danielkvist/botio/client"
)
// Bot is an interface to manage bots for differentes platforms.
type Bot interface {
Connect(c client.Client, addr string, token string, cap int, defaultResponse string) error
Start() error
Listen() error
Stop() error
}
// Response represents a bot response.
type Response struct {
id string
text string
}
// Create returns a bot that satisfies the Bot interface
// depending on the received platform. If the platform is not supported
// it returns an error.
func Create(platform string) (Bot, error) {
switch platform {
case "telegram":
return &Telegram{}, nil
case "discord":
return &Discord{}, nil
default:
return nil, fmt.Errorf("platform %q not supported", platform)
}
}
|
package library
import (
"encoding/json"
"errors"
)
type MixInt int
func (u *MixInt) UnmarshalJSON(bs []byte) error {
var i int
if err := json.Unmarshal(bs, &i); err == nil {
*u = MixInt(i)
return nil
}
var s string
if err := json.Unmarshal(bs, &s); err != nil {
return errors.New("expected a string or an integer")
}
if err := json.Unmarshal([]byte(s), &i); err != nil {
return err
}
*u = MixInt(i)
return nil
}
|
// Package main - задание шестого урока для курса go-core.
package main
import (
"fmt"
"go.core/lesson6/pkg/cache/local"
"go.core/lesson6/pkg/crawler"
"go.core/lesson6/pkg/crawler/spider"
"go.core/lesson6/pkg/engine"
"go.core/lesson6/pkg/index"
"go.core/lesson6/pkg/storage"
"go.core/lesson6/pkg/storage/bstree"
"strings"
)
func main() {
urls := []string{
"https://altech.online",
"https://www.coffeestainstudios.com",
"https://golangs.org",
"https://www.cyberpunk.net/ru/ru/",
}
var bt bstree.Tree
cf := local.Cache{Path: "../data/storage.txt"}
// Инициализация движка с сервисами
e := engine.Engine{
Index: index.New(),
Storage: storage.New(&bt), // Хранилище для быстрого поиска и вставки (Бинарное дерево)
Cache: &cf, // Локальный кэш
}
// Пробуем загрузить данные из файла хранилища
if err := e.Load(); err != nil {
fmt.Println(err)
return
}
// Инициализируем краулер со сканером пауком
var s spider.Scanner
c := crawler.New(s)
// Запускаем сканирование парралельно основной работе
go func() {
docs, err := c.Scan(urls, 2)
if err != nil {
return
}
// Обновляем хранилище в памяти и индекс
e.Index.Create(docs)
e.Storage.Create(docs)
// Сохраняем данные сканера в файл хранилища в формате JSON
if e.Save(docs) != nil {
fmt.Println("Не удалось сохранить данные в хранилище")
return
}
}()
var str string
for {
fmt.Print("Введите поисковый запрос: ")
_, err := fmt.Scanf("%s\n", &str)
if err != nil {
fmt.Println("Программа завершила работу.")
return
}
IDs := e.Index.Find(strings.ToLower(str))
var res []crawler.Document
for _, id := range IDs {
if d, ok := e.Storage.Document(id); ok != false {
res = append(res, d)
}
}
fmt.Printf("Результаты поиска по запросу \"%s\":\nНайдено всего: %d\n", str, len(res))
for _, doc := range res {
fmt.Println(doc)
}
}
}
|
package server
const (
BufferSize = 256
HostnameSize = 64
PodNameSize = 253
ContainerNameSize = 253
PodNamespaceSize = 253
PodUIDSize = 32
)
type TtyWrite struct {
Count uint32
Buffer [BufferSize]byte
Timestamp uint64
Inode uint64
MountNamespaceInum uint64
Hostname [HostnameSize]byte
ContainerName [ContainerNameSize]byte
PodName [PodNameSize]byte
PodNamespace [PodNamespaceSize]byte
PodUID [PodUIDSize]byte
}
|
package main
import (
"strings"
"testing"
)
var encoded = "0222112222120000"
var layers = []Layer{
Layer{2, 2, []Pixel{0, 2, 2, 2}},
Layer{2, 2, []Pixel{1, 1, 2, 2}},
Layer{2, 2, []Pixel{2, 2, 1, 2}},
Layer{2, 2, []Pixel{0, 0, 0, 0}},
}
func TestParse(t *testing.T) {
decoded, err := Parse(2, 2, strings.NewReader(encoded))
if err != nil {
t.Errorf("Parse(%v) error: %s", encoded, err)
} else if len(decoded) != len(layers) {
t.Errorf("got %d layers; expected %d", len(decoded), len(layers))
} else {
for i := range layers {
if !LayerEquals(decoded[i], layers[i]) {
t.Errorf("layers[%d] = %v; expected %v", i, decoded[i], layers[i])
}
}
}
}
func TestFlatten(t *testing.T) {
expected := Layer{2, 2, []Pixel{0, 1, 1, 0}}
flat, err := Flatten(layers)
if err != nil {
t.Errorf("Flatten(%v) error: %s", encoded, err)
} else if !LayerEquals(flat, expected) {
t.Errorf("Flatten(%v) = %v; expected = %v", encoded, flat, expected)
}
}
// LayerEquals returns true if the two given layers are the same, false
// otherwise.
func LayerEquals(a, b Layer) bool {
if a.width != b.width || a.height != b.height {
return false
}
for i := range a.pixels {
if a.pixels[i] != b.pixels[i] {
return false
}
}
return true
}
|
package gomeh
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"time"
)
func readKey() string {
f := "./apikey"
var key string
// If API key env exists use that
if env := os.Getenv("meh_apikey"); env != "" {
// Read API key from env (for travis)
key = env
} else {
// Read API key from file, if exists
if _, err := os.Stat(f); err == nil {
buf, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
key = string(buf)
}
}
// Trim the string to remove any whitespace or linebreaks
return strings.Trim(key, " \n")
}
func ExampleGetMeh_output() {
apikey := readKey()
m, err := GetMeh(apikey)
if err != nil {
log.Fatal(err)
}
fmt.Println(m)
}
func TestGetMeh(t *testing.T) {
apikey := readKey()
m, err := GetMeh(apikey)
if err != nil {
t.Error("Failed to retreive data from API.")
}
if len(m.Deal.Title) == 0 {
t.Error("Missing deal.")
}
}
func TestSoldOut(t *testing.T) {
m := Meh{
Deal: Deal{
SoldOutAt: time.Now(),
},
}
if m.SoldOut() != true {
t.Error("Failed to correctly recognize as Sold Out.")
}
}
func TestString(t *testing.T) {
// Available product
m := Meh{
Deal: Deal{
Title: "Product",
Items: []Item{
Item{Price: 5},
},
},
}
if fmt.Sprint(m) != fmt.Sprint("Product - $5") {
t.Error("Failed to return available product string.")
}
// Sold out produc
// Available product
m = Meh{
Deal: Deal{
Title: "Product2",
SoldOutAt: time.Now(),
Items: []Item{
Item{Price: 5},
},
},
}
if fmt.Sprint(m) != fmt.Sprint("[Sold Out] Product2 - $5") {
t.Error("Failed to return available product string.")
}
}
|
package privacy_v2
import (
"bytes"
"fmt"
"testing"
"incognito-chain/privacy/coin"
"incognito-chain/privacy/key"
"incognito-chain/privacy/operation"
"incognito-chain/common"
"incognito-chain/key/incognitokey"
"github.com/stretchr/testify/assert"
)
// TEST DURATION NOTE : 100 iterations of 1-to-12 coins = 15sec
var (
numOfLoops = 100
minOutCoinCount = 1
maxOutCoinCount = 12
)
var _ = func() (_ struct{}) {
fmt.Println("This runs before init() starting payment v2 logger for test !")
Logger.Init(common.NewBackend(nil).Logger("test", true))
return
}()
func TestPaymentV2InitAndMarshalling(t *testing.T) {
for loop:=0;loop<=numOfLoops;loop++{
outCoinCount := common.RandInt() % (maxOutCoinCount-minOutCoinCount+1) + minOutCoinCount
// make some dummy private keys for our dummy users
dummyPrivateKeys := make([]*operation.Scalar,outCoinCount)
for i,_ := range dummyPrivateKeys{
dummyPrivateKeys[i] = operation.RandomScalar()
}
// each of these dummy users are provided a (not confirmed by blockchain) coinv2 of value 3000
// paymentAdress is persistent and held by this user, while the OTA is inside the coin
paymentInfo := make([]*key.PaymentInfo, len(dummyPrivateKeys))
for i, pk := range dummyPrivateKeys {
pkb := pk.ToBytes()
paymentInfo[i] = key.InitPaymentInfo(key.GeneratePaymentAddress(pkb[:]),3000,[]byte{})
}
inputCoins := make([]coin.PlainCoin, outCoinCount)
for i:=0;i<outCoinCount;i++ {
var err error
inputCoins[i],err = coin.NewCoinFromPaymentInfo(paymentInfo[i])
if err!=nil{
fmt.Println(err)
}
}
// in this test, each user will send themselves 2000 and the rest is txfee
for _,pInf := range paymentInfo{
pInf.Amount = 2000
}
outputCoins := make([]*coin.CoinV2, outCoinCount)
for i:=0;i<outCoinCount;i++ {
var err error
outputCoins[i],err = coin.NewCoinFromPaymentInfo(paymentInfo[i])
if err!=nil{
fmt.Println(err)
}
}
// prove and verify without privacy (no bulletproof)
// also marshal to byte and back
proof, err := Prove(inputCoins, outputCoins, false, paymentInfo)
assert.Equal(t, nil, err)
b := proof.Bytes()
temp := new(PaymentProofV2)
err = temp.SetBytes(b)
b2 := temp.Bytes()
assert.Equal(t, true, bytes.Equal(b2, b))
// correct,err := proof.Verify(false, nil, uint64(1000*outCoinCount), byte(0), nil, false, nil)
// assert.Equal(t, nil, err)
// assert.Equal(t,true,correct)
}
}
func TestPaymentV2ProveWithPrivacy(t *testing.T) {
outCoinCount := common.RandInt() % (maxOutCoinCount-minOutCoinCount+1) + minOutCoinCount
for loop:=0;loop<numOfLoops;loop++{
// make some dummy private keys for our dummy users
dummyPrivateKeys := make([]*key.PrivateKey,outCoinCount)
for i := 0; i < outCoinCount; i += 1 {
privateKey := key.GeneratePrivateKey(common.RandBytes(32))
dummyPrivateKeys[i] = &privateKey
}
// each of these dummy users are provided a (not confirmed by blockchain) coinv2 of value 3000
// paymentAdress is persistent and held by this user, while the OTA is inside the coin
paymentInfo := make([]*key.PaymentInfo, len(dummyPrivateKeys))
keySets := make([]*incognitokey.KeySet,len(dummyPrivateKeys))
for i, _ := range dummyPrivateKeys {
keySets[i] = new(incognitokey.KeySet)
err := keySets[i].InitFromPrivateKey(dummyPrivateKeys[i])
assert.Equal(t, nil, err)
paymentInfo[i] = key.InitPaymentInfo(keySets[i].PaymentAddress,3000,[]byte{})
}
inputCoins := make([]coin.PlainCoin, outCoinCount)
for i:=0;i<outCoinCount;i++ {
var err error
inputCoins[i],err = coin.NewCoinFromPaymentInfo(paymentInfo[i])
if err!=nil{
fmt.Println(err)
}
ic_specific,ok := inputCoins[i].(*coin.CoinV2)
assert.Equal(t, true, ok)
ic_specific.ConcealOutputCoin(keySets[i].PaymentAddress.GetPublicView())
ic_specific.Decrypt(keySets[i])
}
// in this test, each user will send some other rando 2500 and the rest is txfee
outPaymentInfo := make([]*key.PaymentInfo, len(dummyPrivateKeys))
for i, _ := range dummyPrivateKeys {
otherPriv := operation.RandomScalar()
pkb := otherPriv.ToBytes()
outPaymentInfo[i] = key.InitPaymentInfo(key.GeneratePaymentAddress(pkb[:]),2500,[]byte{})
}
outputCoins := make([]*coin.CoinV2, outCoinCount)
for i:=0;i<outCoinCount;i++ {
var err error
outputCoins[i],err = coin.NewCoinFromPaymentInfo(outPaymentInfo[i])
if err!=nil{
fmt.Println(err)
}
}
// prove and verify with privacy using bulletproof
// note that bulletproofs only assure each outcoin amount is in uint64 range
// while the equality suminput = suminput + sumfee must be checked using mlsag later
// here our mock scenario has out+fee>in but passes anyway
proof, err := Prove(inputCoins, outputCoins, true, paymentInfo)
assert.Equal(t, nil, err)
isSane, err := proof.ValidateSanity()
assert.Equal(t,nil,err)
assert.Equal(t,true,isSane)
isValid,err := proof.Verify(true, nil, uint64(200*outCoinCount), byte(0), nil, false, nil)
assert.Equal(t, nil, err)
assert.Equal(t,true,isValid)
pBytes := proof.Bytes()
// try `corrupting` one byte in the proof
for i:=0; i<10; i++{
b := make([]byte, len(pBytes))
copy(b, pBytes)
corruptedIndex := common.RandInt() % len(b)
// random in 1..255 (not zero)
diff := common.RandInt() % 255 + 1
b[corruptedIndex] = byte((int(b[corruptedIndex]) + diff) % 256)
reconstructedProof := new(PaymentProofV2)
// it's a corrupted proof so it must fail one of these 3 checks
err = reconstructedProof.SetBytes(b)
if err != nil{
continue
}
isSane, err = reconstructedProof.ValidateSanity()
if !isSane{
continue
}
isValid,err = reconstructedProof.Verify(true, nil, uint64(200*outCoinCount), byte(0), nil, false, nil)
if !isValid{
continue
}
fmt.Printf("Corrupted proof %v",reconstructedProof)
assert.Equal(t,false,isValid)
}
// try completely made up proof
for i:=0; i<10; i++{
// length from 0..299
randomLength := common.RandInt() % 300
// 10% of the time the stupid proof is very long
if i==0{
randomLength += 3000000
}
bs := common.RandBytes(randomLength)
reconstructedProof := new(PaymentProofV2)
err = reconstructedProof.SetBytes(bs)
// it's a bs proof so it must fail one of these 3 checks
if err != nil{
continue
}
isSane, err = reconstructedProof.ValidateSanity()
if !isSane{
continue
}
isValid,err = reconstructedProof.Verify(true, nil, uint64(200*outCoinCount), byte(0), nil, false, nil)
if !isValid{
continue
}
fmt.Printf("Forged proof %v",reconstructedProof)
assert.Equal(t,false,isValid)
}
}
}
|
package demotest
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
"github.com/yikeso/goDemo/down"
)
func TestDownloadUrlFile(t *testing.T){
Convey("测试文件下载方法",t,func(){
err := down.DownloadUrlFile("http://mirrors.sohu.com/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-1611.iso","e:/dlp")
So(err,ShouldBeNil)
})
}
|
// @APIVersion 1.0.0
// @Title beego Test API
// @Description beego has a very cool tools to autogenerate documents for your API
// @Contact astaxie@gmail.com
// @TermsOfServiceUrl http://beego.me/
// @License Apache 2.0
// @LicenseUrl http://www.apache.org/licenses/LICENSE-2.0.html
package routers
import (
"scholarship/controllers"
"github.com/astaxie/beego"
)
func init() {
ns := beego.NewNamespace("/v1",
beego.NSNamespace("/object",
beego.NSInclude(
&controllers.ObjectController{},
),
),
beego.NSNamespace("/user",
beego.NSInclude(
&controllers.UserController{},
),
),
beego.NSNamespace("/sendTx",
beego.NSInclude(
&controllers.SendTxController{},
),
),
beego.NSNamespace("/ipfs",
beego.NSInclude(
&controllers.IpfsController{},
),
),
beego.NSNamespace("/student",
beego.NSInclude(
&controllers.StudentController{},
),
),
beego.NSNamespace("/project",
beego.NSInclude(
&controllers.ProjectController{},
),
),
beego.NSNamespace("/signature",
beego.NSInclude(
&controllers.SignatureController{},
),
),
beego.NSNamespace("/scholarship",
beego.NSInclude(
&controllers.ScholarshipController{},
),
),
beego.NSNamespace("/recharge",
beego.NSInclude(
&controllers.RechargeController{},
),
),
beego.NSNamespace("/account",
beego.NSInclude(
&controllers.AccountController{},
),
),
beego.NSNamespace("/accountBalances",
beego.NSInclude(
&controllers.AccountBalanceController{},
),
),
)
beego.AddNamespace(ns)
}
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"github.com/GoogleCloudPlatform/anthos-samples/anthos-bm-gcp-terraform/util"
"github.com/GoogleCloudPlatform/anthos-samples/anthos-bm-gcp-terraform/validation"
"github.com/gruntwork-io/terratest/modules/gcp"
"github.com/gruntwork-io/terratest/modules/terraform"
testStructure "github.com/gruntwork-io/terratest/modules/test-structure"
"github.com/stretchr/testify/assert"
)
func TestUnit_MainScript_InstallMode(goTester *testing.T) {
moduleDir := testStructure.CopyTerraformFolderToTemp(goTester, "../../", ".")
projectID := gcp.GetGoogleProjectIDFromEnvVar(goTester) // from GOOGLE_CLOUD_PROJECT
workingDir, err := os.Getwd()
util.LogError(err, "Failed to read current working directory")
credentialsFile := fmt.Sprintf("%s/credentials_file.json", workingDir)
dummyCredentials := `
{
"type": "service_account",
"project_id": "temp-proj",
"private_key_id": "pkey-id",
"private_key": "-----BEGIN PRIVATE KEY-----\npkey\n-----END PRIVATE KEY-----\n",
"client_email": "temp-proj@temp-proj.iam.gserviceaccount.com",
"client_id": "12344321",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/temp-proj@temp-proj.iam.gserviceaccount.com"
}
`
if _, err := os.Stat(credentialsFile); err == nil {
os.Remove(credentialsFile)
}
tmpFile, err := os.Create(credentialsFile)
util.LogError(err, fmt.Sprintf("Could not create temporary file at %s", credentialsFile))
defer tmpFile.Close()
defer os.Remove(credentialsFile)
_, err = tmpFile.WriteString(dummyCredentials)
util.LogError(err, fmt.Sprintf("Could not write to temporary file at %s", credentialsFile))
tmpFile.Sync()
mode := "install"
resourcesPath := "./resources"
bootDiskSize := 175
instanceCount := map[string]int{
"controlplane": 3,
"worker": 2,
}
tfPlanOutput := "terraform_test.tfplan"
tfPlanOutputArg := fmt.Sprintf("-out=%s", tfPlanOutput)
tfVarsMap := map[string]interface{}{
"project_id": projectID,
"credentials_file": credentialsFile,
"mode": mode,
"resources_path": resourcesPath,
"instance_count": instanceCount,
"boot_disk_size": bootDiskSize,
}
tfOptions := terraform.WithDefaultRetryableErrors(goTester, &terraform.Options{
TerraformDir: moduleDir,
// Variables to pass to our Terraform code using -var options
Vars: tfVarsMap,
PlanFilePath: tfPlanOutput,
})
// Terraform init and plan only
terraform.Init(goTester, tfOptions)
terraform.RunTerraformCommand(
goTester,
tfOptions,
terraform.FormatArgs(tfOptions, "plan", tfPlanOutputArg)...,
)
tfPlanJSON, err := terraform.ShowE(goTester, tfOptions)
util.LogError(err, fmt.Sprintf("Failed to parse the plan file %s into JSON format", tfPlanOutput))
var terraformPlan util.MainModulePlan
err = json.Unmarshal([]byte(tfPlanJSON), &terraformPlan)
util.LogError(err, "Failed to parse the JSON plan into the MainModulePlan struct in unit/module_main.go")
/**
* Pro tip:
* Write the json to a file using the util.WriteToFile() method to easily debug
* util.WriteToFile(tfPlanJSON, "../../plan.json")
*/
var installAbmModule []util.TFModule // install_abm
for _, childModule := range terraformPlan.PlannedValues.RootModule.ChildModules {
moduleAddress := childModule.ModuleAddress
if strings.HasSuffix(moduleAddress, "instance_template") ||
strings.HasSuffix(moduleAddress, "vm_hosts") ||
strings.HasSuffix(moduleAddress, "service_accounts") ||
strings.Contains(moduleAddress, "google_apis") ||
strings.Contains(moduleAddress, "init_hosts") ||
strings.Contains(moduleAddress, "gke_hub_membership") {
continue
} else if strings.Contains(moduleAddress, "install_abm") {
installAbmModule = append(installAbmModule, childModule)
} else {
goTester.Errorf("Unexpected module with address [%s] at planned_values.root_module.child_modules", moduleAddress)
}
}
assert.Len(
goTester,
installAbmModule,
1,
"Unexpected number of child modules with address type install_abm at planned_values.root_module.child_modules",
)
// validate the outputs from the script
validation.ValidateMainOutputsForInstallMode(goTester, terraformPlan.PlannedValues.Outputs, &tfVarsMap)
}
func TestUnit_MainScript_ManualLB(goTester *testing.T) {
moduleDir := testStructure.CopyTerraformFolderToTemp(goTester, "../../", ".")
projectID := gcp.GetGoogleProjectIDFromEnvVar(goTester) // from GOOGLE_CLOUD_PROJECT
workingDir, err := os.Getwd()
util.LogError(err, "Failed to read current working directory")
credentialsFile := fmt.Sprintf("%s/credentials_file.json", workingDir)
dummyCredentials := `
{
"type": "service_account",
"project_id": "temp-proj",
"private_key_id": "pkey-id",
"private_key": "-----BEGIN PRIVATE KEY-----\npkey\n-----END PRIVATE KEY-----\n",
"client_email": "temp-proj@temp-proj.iam.gserviceaccount.com",
"client_id": "12344321",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/temp-proj@temp-proj.iam.gserviceaccount.com"
}
`
if _, err := os.Stat(credentialsFile); err == nil {
os.Remove(credentialsFile)
}
tmpFile, err := os.Create(credentialsFile)
util.LogError(err, fmt.Sprintf("Could not create temporary file at %s", credentialsFile))
defer tmpFile.Close()
defer os.Remove(credentialsFile)
_, err = tmpFile.WriteString(dummyCredentials)
util.LogError(err, fmt.Sprintf("Could not write to temporary file at %s", credentialsFile))
tmpFile.Sync()
bootDiskSize := 175
abmClusterID := "test-abm-cluster-id"
resourcesPath := "./resources"
instanceCount := map[string]int{
"controlplane": 3,
"worker": 2,
}
scriptMode := "manuallb"
tfPlanOutput := "terraform_test.tfplan"
tfPlanOutputArg := fmt.Sprintf("-out=%s", tfPlanOutput)
// Variables to pass to our Terraform code using -var options
tfVarsMap := map[string]interface{}{
"project_id": projectID,
"credentials_file": credentialsFile,
"resources_path": resourcesPath,
"boot_disk_size": bootDiskSize,
"abm_cluster_id": abmClusterID,
"instance_count": instanceCount,
"mode": scriptMode,
}
tfOptions := terraform.WithDefaultRetryableErrors(goTester, &terraform.Options{
TerraformDir: moduleDir,
Vars: tfVarsMap,
PlanFilePath: tfPlanOutput,
})
// Terraform init and plan only
terraform.Init(goTester, tfOptions)
terraform.RunTerraformCommand(
goTester,
tfOptions,
terraform.FormatArgs(tfOptions, "plan", tfPlanOutputArg)...,
)
tfPlanJSON, err := terraform.ShowE(goTester, tfOptions)
util.LogError(err, fmt.Sprintf("Failed to parse the plan file %s into JSON format", tfPlanOutput))
var terraformPlan util.MainModulePlan
err = json.Unmarshal([]byte(tfPlanJSON), &terraformPlan)
util.LogError(err, "Failed to parse the JSON plan into the MainModulePlan struct in unit/module_main.go")
/**
* Pro tip:
* Write the json to a file using the util.WriteToFile() method to easily debug
* util.WriteToFile(tfPlanJSON, "../../plan.json")
*/
// validate the plan has the expected variable for 'mode'
assert.NotNil(
goTester,
terraformPlan.Variables.Mode,
"Variable not found in plan: mode",
)
// validate the plan has 'manuallb' set for mode
assert.Equal(
goTester,
terraformPlan.Variables.Mode.Value,
scriptMode,
fmt.Sprintf("Variable 'mode' is the plan is not set to: %s", scriptMode),
)
foundYamlResource := false
for _, rootResource := range terraformPlan.PlannedValues.RootModule.Resources {
if rootResource.Name == "cluster_yaml_manuallb" {
foundYamlResource = true
break
}
}
assert.True(
goTester,
foundYamlResource,
"There was no resource for the ABM cluster yaml. Hence could not validate it",
)
var ingressLBModule []util.TFModule
var controlplaneLBModule []util.TFModule
var installAbmModule []util.TFModule // install_abm
for _, childModule := range terraformPlan.PlannedValues.RootModule.ChildModules {
moduleAddress := childModule.ModuleAddress
if strings.HasSuffix(moduleAddress, "instance_template") ||
strings.HasSuffix(moduleAddress, "vm_hosts") ||
strings.HasSuffix(moduleAddress, "service_accounts") ||
strings.Contains(moduleAddress, "google_apis") ||
strings.Contains(moduleAddress, "init_hosts") ||
strings.Contains(moduleAddress, "gke_hub_membership") {
continue
} else if strings.HasSuffix(moduleAddress, "configure_ingress_lb[0]") {
ingressLBModule = append(ingressLBModule, childModule)
} else if strings.HasSuffix(moduleAddress, "configure_controlplane_lb[0]") {
controlplaneLBModule = append(controlplaneLBModule, childModule)
} else if strings.Contains(moduleAddress, "install_abm") {
installAbmModule = append(installAbmModule, childModule)
} else {
goTester.Errorf("Unexpected module with address [%s] at planned_values.root_module.child_modules", moduleAddress)
}
}
assert.Len(
goTester,
ingressLBModule,
1,
"Unexpected number of child modules with address type configure_ingress_lb at planned_values.root_module.child_modules",
)
assert.Len(
goTester,
controlplaneLBModule,
1,
"Unexpected number of child modules with address type configure_controlplane_lb at planned_values.root_module.child_modules",
)
assert.Len(
goTester,
installAbmModule,
1,
"Unexpected number of child modules with address type install_abm at planned_values.root_module.child_modules",
)
// validate the outputs from the script
validation.ValidateMainOutputsForManualMode(goTester, terraformPlan.PlannedValues.Outputs, &tfVarsMap)
}
|
package noise
import (
chacha "golang.org/x/crypto/chacha20poly1305"
)
var zeroNonce [chacha.NonceSize]byte
const (
encryptedKeySize = 48
encryptedNothingSize = 16
)
// EncryptedKey ...
type EncryptedKey [encryptedKeySize]byte
// EncryptedNothing ...
type EncryptedNothing [encryptedNothingSize]byte
|
package books
import (
"github.com/MuchChaca/GoLangTraining/04perso/03iris/04exp_test/authors"
"github.com/MuchChaca/GoLangTraining/04perso/03iris/04exp_test/genres"
)
// Book represents a book
type Book struct {
// SessionID string `json:"-"`
ID int64 `json:"id,omitempty"`
Title string `json:"title"`
Description bool `json:"description"`
Author authors.Author `json:"author"`
Genres []genres.Genre `json:"genres"`
}
|
package errors
// AirshipError is the base error type
// used to create extended error types
// in other airshipctl packages.
type AirshipError struct {
Message string
}
// Error function implments the golang
// error interface
func (ae *AirshipError) Error() string {
return ae.Message
}
// ErrNotImplemented returned for not implemented features
type ErrNotImplemented struct {
}
func (e ErrNotImplemented) Error() string {
return "Not implemented"
}
// ErrWrongConfig returned in case of incorrect configuration
type ErrWrongConfig struct {
}
func (e ErrWrongConfig) Error() string {
return "Wrong configuration"
}
// ErrMissingConfig returned in case of missing configuration
type ErrMissingConfig struct {
}
func (e ErrMissingConfig) Error() string {
return "Missing configuration"
}
// ErrConfigFailed returned in case of failure during configuration
type ErrConfigFailed struct {
}
func (e ErrConfigFailed) Error() string {
return "Configuration failed to complete."
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"database/sql/driver"
"encoding/json"
"errors"
)
type Uint32Slice []uint32
// Value implements the driver.Valuer interface
// See gorm document about customizing data types: https://gorm.io/docs/data_types.html
func (u Uint32Slice) Value() (driver.Value, error) {
// Convert the Uint32Slice to a JSON-encoded string
data, err := json.Marshal(u)
if err != nil {
return nil, err
}
return string(data), nil
}
// Scan implements the sql.Scanner interface
// See gorm document about customizing data types: https://gorm.io/docs/data_types.html
func (u *Uint32Slice) Scan(value interface{}) error {
// Check if the value is nil
if value == nil {
return nil
}
switch v := value.(type) {
case []byte:
// The value is of type []byte (MySQL driver representation for JSON columns)
// Unmarshal the JSON-encoded value to Uint32Slice
err := json.Unmarshal(v, u)
if err != nil {
return err
}
case string:
// The value is of type string (fallback for older versions of MySQL driver)
// Unmarshal the JSON-encoded value to Uint32Slice
err := json.Unmarshal([]byte(v), u)
if err != nil {
return err
}
default:
return errors.New("unsupported Scan type for Uint32Slice")
}
return nil
}
|
package main
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
)
var db *sqlx.DB //是一个连接池对象
func initDb() (err error) {
//数据库信息
dsn := "root:root@tcp(127.0.0.1:3306)/goday10"
//连接数据库
//db 全局的db
db, err = sqlx.Connect("mysql", dsn)
if err != nil {
return
}
db.SetMaxOpenConns(10) //设置数据库连接池的最大连接数
db.SetConnMaxLifetime(5) //设置最大空闲连接数
return
}
//查询
type user struct {
Id int `json:"id"`
Name string `json:"name"`
Age int `json:"age"`
}
//SQL注入
func sqlDemo(name string) {
sqlStr1 := fmt.Sprintf("select id,name,age from user where name='%s'", name)
fmt.Println(sqlStr1)
var users []user
db.Select(&users, sqlStr1)
for _, u := range users {
fmt.Println(u)
}
}
func main() {
err := initDb()
if err != nil {
fmt.Println("init db failed,", err)
return
}
fmt.Println("连接数据库成功!")
//sql注入的几种示例
sqlDemo("xxx' or 1=1#")
}
|
package slack
import (
"encoding/json"
"strings"
"testing"
"github.com/go-test/deep"
)
func TestAttachment_UnmarshalMarshalJSON_WithBlocks(t *testing.T) {
originalAttachmentJson := `{
"id": 1,
"blocks": [
{
"type": "section",
"block_id": "xxxx",
"text": {
"type": "mrkdwn",
"text": "Pick something:",
"verbatim": true
},
"accessory": {
"type": "static_select",
"action_id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"placeholder": {
"type": "plain_text",
"text": "Select one item",
"emoji": true
},
"options": [
{
"text": {
"type": "plain_text",
"text": "ghi",
"emoji": true
},
"value": "ghi"
}
]
}
}
],
"color": "#13A554",
"fallback": "[no preview available]"
}`
attachment := new(Attachment)
err := json.Unmarshal([]byte(originalAttachmentJson), attachment)
if err != nil {
t.Fatalf("expected no error unmarshaling attachment with blocks, got: %v", err)
}
actualAttachmentJson, err := json.Marshal(attachment)
if err != nil {
t.Fatal(err)
}
var (
actual interface{}
expected interface{}
)
if err = json.Unmarshal([]byte(originalAttachmentJson), &expected); err != nil {
t.Fatal(err)
}
if err = json.Unmarshal(actualAttachmentJson, &actual); err != nil {
t.Fatal(err)
}
if diff := deep.Equal(actual, expected); diff != nil {
t.Fatal("actual does not match expected\n", strings.Join(diff, "\n"))
}
}
|
// Simple program which prints the hostname of your webserver
// And return code500 after 10 hits
package main
import (
"fmt"
"log"
"net/http"
"os"
)
const (
listen = "0.0.0.0"
port = "8080"
)
type counter struct {
count int
}
var c counter
func (c *counter) init(initCount int) {
c.count = initCount
}
func (c *counter) incr() {
c.count++
}
func (c *counter) get() int {
return c.count
}
func handler(w http.ResponseWriter, r *http.Request) {
hostname, err := os.Hostname()
if err != nil {
log.Fatal(err)
}
c.incr()
// if c.count > 5 {
// w.WriteHeader(http.StatusInternalServerError)
// }
fmt.Fprintf(w, "The hostname is: "+hostname+"\nThe Version is: V1\n")
fmt.Printf("Count: %v\n", c.count)
}
func main() {
c.init(0)
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(listen+":"+port, nil))
}
|
package models
import (
"log"
"github.com/go-bongo/bongo"
)
func Db() *bongo.Connection {
config := &bongo.Config{
ConnectionString: "localhost",
Database: "bongotest",
}
connection, err := bongo.Connect(config)
if err != nil {
log.Fatal(err)
}
return connection
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobserver
import (
"fmt"
"http"
"os"
"sync"
"camli/jsonconfig"
)
type Loader interface {
GetStorage(prefix string) (Storage, os.Error)
GetHandlerType(prefix string) string // or ""
// Returns either a Storage or an http.Handler
GetHandler(prefix string) (interface{}, os.Error)
FindHandlerByTypeIfLoaded(htype string) (prefix string, handler interface{}, err os.Error)
}
type StorageConstructor func(Loader, jsonconfig.Obj) (Storage, os.Error)
type HandlerConstructor func(Loader, jsonconfig.Obj) (http.Handler, os.Error)
var mapLock sync.Mutex
var storageConstructors = make(map[string]StorageConstructor)
var handlerConstructors = make(map[string]HandlerConstructor)
func RegisterStorageConstructor(typ string, ctor StorageConstructor) {
mapLock.Lock()
defer mapLock.Unlock()
if _, ok := storageConstructors[typ]; ok {
panic("blobserver: StorageConstructor already registered for type: " + typ)
}
storageConstructors[typ] = ctor
}
func CreateStorage(typ string, loader Loader, config jsonconfig.Obj) (Storage, os.Error) {
mapLock.Lock()
ctor, ok := storageConstructors[typ]
mapLock.Unlock()
if !ok {
return nil, fmt.Errorf("Storage type %q not known or loaded", typ)
}
return ctor(loader, config)
}
func RegisterHandlerConstructor(typ string, ctor HandlerConstructor) {
mapLock.Lock()
defer mapLock.Unlock()
if _, ok := handlerConstructors[typ]; ok {
panic("blobserver: HandlerConstrutor already registered for type: " + typ)
}
handlerConstructors[typ] = ctor
}
func CreateHandler(typ string, loader Loader, config jsonconfig.Obj) (http.Handler, os.Error) {
mapLock.Lock()
ctor, ok := handlerConstructors[typ]
mapLock.Unlock()
if !ok {
return nil, fmt.Errorf("blobserver: Handler type %q not known or loaded", typ)
}
return ctor(loader, config)
}
|
package main
import "fmt"
func main() {
//var numbers =make([] int ,3, 5)
var numbers1=[] int {1,2,3,4,5}
//numbers ={1,2,1,5,6}
printSlice(numbers1)
numbers2:=numbers1[1:2]
printSlice(numbers2)
}
func printSlice(x [] int ){
fmt.Printf("len=%d\n,cap=%d \n slice=%v\n",len(x),cap(x),x)
}
|
package gen
import (
"strings"
"text/template"
)
type Option func(cfg *config)
type config struct {
gofmt bool
tmplLDelimiter, tmplRDelimiter string
funcMap template.FuncMap
}
var defaultCfg = config{
gofmt: true,
tmplLDelimiter: "<<",
tmplRDelimiter: ">>",
funcMap: map[string]interface{}{
"Title": strings.Title,
},
}
func (cfg *config) use(opts ...Option) {
for _, opt := range opts {
if opt == nil {
continue
}
opt(cfg)
}
}
func (cfg *config) deepCopy() config {
newCfg := *cfg
newCfg.funcMap = map[string]interface{}{}
for k, v := range cfg.funcMap {
newCfg.funcMap[k] = v
}
return newCfg
}
func newConfig(opts ...Option) config {
cfg := defaultCfg.deepCopy()
cfg.use(opts...)
return cfg
}
// to support using code like `Options.XXX()` to create a Option
type optionFactory struct{}
var Options optionFactory
func (optionFactory) GoFmt(enable bool) Option {
return func(cfg *config) {
cfg.gofmt = enable
}
}
func (optionFactory) TemplateDelimiter(left, right string) Option {
return func(cfg *config) {
cfg.tmplLDelimiter = left
cfg.tmplRDelimiter = right
}
}
func (optionFactory) Func(key string, f interface{}) Option {
return func(cfg *config) {
// assert: cfg.funcMap != nil
cfg.funcMap[key] = f
}
}
|
package restserver
import "github.com/astaxie/beego"
//MainPage ...
type MainPage struct {
beego.Controller
}
//GoMainPage ...
func (c *MainPage) GoMainPage() {
var index = `
<!DOCTYPE html>
<html>
<head>
<style>
#header {
background-color: black;
color: white;
text-align: center;
padding: 5px;
}
#nav {
line-height: 30px;
background-color: #eeeeee;
height: 800px;
width: 15%;
float: left;
padding: 5px;
}
#section {
width: 80%;
float: left;
padding: 10px;
}
#footer {
background-color: black;
color: white;
clear: both;
text-align: center;
padding: 5px;
}
</style>
</head>
<body>
<div id="header">
<h1>ATMP agent 调测工具</h1>
</div>
<div id="nav">
<ol>
<li onclick="getMemTaskInfo()" style="list-style-type:none;"><a href="#">内存Task信息</a></li>
<li onclick="getStorageTaskInfo()" style="list-style-type:none;"><a href="#">存储Task信息</a></li>
<li onclick="getStorageConfInfo()" style="list-style-type:none;"><a href="#">系统配置信息</a></li>
<li onclick="getlogInfo()" style="list-style-type:none;"><a href="#">全量日志信息</a></li>
<li onclick="getErrorlogInfo()" style="list-style-type:none;"><a href="#">Error日志信息</a></li>
<li onclick="getWarnlogInfo()" style="list-style-type:none;"><a href="#">Warnning日志信息</a></li>
<li onclick="getInfologInfo()" style="list-style-type:none;"><a href="#">Info日志信息</a></li>
<li onclick="getDebuglogInfo()" style="list-style-type:none;"><a href="#">Debug日志信息</a></li>
<li style="list-style-type:none;"><a href="/data/logs/atmp_agent.log" target="_blank">日志下载</a></li>
</ol>
</div>
<div id="section">
<h2 id="section_title">请选择右边的查看项</h2>
<pre id="section_context">
</pre>
</div>
<div id="footer">
Copyright ? zeusis.com
</div>
<script>
function getMemTaskInfo() {
document.getElementById("section_title").innerHTML = "内存中任务信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/gettaskinfo?type=Mem", true);
xmlhttp.send();
}
function getStorageTaskInfo() {
document.getElementById("section_title").innerHTML = "存储中任务信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/conf/task.data", true);
xmlhttp.send();
}
function getStorageConfInfo() {
document.getElementById("section_title").innerHTML = "存储中的配置信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/conf/app.conf", true);
xmlhttp.send();
}
function getlogInfo() {
document.getElementById("section_title").innerHTML = "atmp_agent.log 日志信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/logs/atmp_agent.log", true);
xmlhttp.send();
}
function getDebuglogInfo() {
document.getElementById("section_title").innerHTML = "atmp_agent.log 日志信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/logs/atmp_agent.debug.log", true);
xmlhttp.send();
}
function getErrorlogInfo() {
document.getElementById("section_title").innerHTML = "atmp_agent.log 日志信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/logs/atmp_agent.error.log", true);
xmlhttp.send();
}
function getInfologInfo() {
document.getElementById("section_title").innerHTML = "atmp_agent.log 日志信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/logs/atmp_agent.info.log", true);
xmlhttp.send();
}
function getDebuglogInfo() {
document.getElementById("section_title").innerHTML = "atmp_agent.log 日志信息"
var xmlhttp;
if (window.XMLHttpRequest) {
// IE7+, Firefox, Chrome, Opera, Safari 浏览器执行代码
xmlhttp = new XMLHttpRequest();
}
else {
// IE6, IE5 浏览器执行代码
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange = function () {
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("section_context").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "/data/logs/atmp_agent.debug.log", true);
xmlhttp.send();
}
function downloadlogInfo() {
}
</script>
</body>
</html>
`
// index = strings.Replace(index, "[[Task.Data]]", filepath.Join(comm.AppSystem.GetRootDir(), "conf", "task.data"), -1)
// index = strings.Replace(index, "[[App.Conf]]", filepath.Join(comm.AppSystem.GetRootDir(), "conf", "app.conf"), -1)
// index = strings.Replace(index, "[[Log.File]]", filepath.Join(comm.AppSystem.GetRootDir(), "logs", "atmp_agent.log"), -1)
c.Ctx.ResponseWriter.Write([]byte(index))
}
|
package tcpexample
import (
"fmt"
"net"
"time"
"github.com/meidoworks/nekoq-api/errorutil"
"github.com/meidoworks/nekoq-api/network"
"github.com/meidoworks/nekoq-api/network/tcp"
)
func ServerExample() {
}
func ClientExample(tcpConnStr string, timeout time.Duration) error {
conn, err := net.DialTimeout("tcp", tcpConnStr, timeout)
if err != nil {
return err
}
tcpConn, ok := conn.(*net.TCPConn)
if !ok {
return errorutil.New("connection type is not *net.TCPConn")
}
defualtHandler := ClientHandler{}
channel, _ := tcp.NewTcpChannel(tcpConn, defualtHandler, new(tcp.TcpChannelOption))
// write
channel.Write(network.NewSimpleWriteEvent([]byte("GET / HTTP/1.1\r\nHost: localhost\r\nConnection: Close\r\n\r\n")))
// flush
channel.Flush(network.NewNoopFlushEvent())
// inbound
time.Sleep(2 * time.Second)
return channel.Close()
}
type ClientHandler struct {
network.DefaultChannelRawSideHandler
}
func (ClientHandler) OnRead(ch network.Channel, data []byte) {
fmt.Print(string(data))
}
func (ClientHandler) OnRawWriteOp(ch network.Channel, data []byte) {
}
|
// Package inmemory implements an in-memory registry.
package inmemory
import (
"context"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/pomerium/pomerium/internal/registry"
"github.com/pomerium/pomerium/internal/signal"
pb "github.com/pomerium/pomerium/pkg/grpc/registry"
)
type inMemoryServer struct {
ttl time.Duration
// onchange is used to broadcast changes to listeners
onchange *signal.Signal
// mu holds lock for regs
mu sync.RWMutex
// regs is {service,endpoint} -> expiration time mapping
regs map[inMemoryKey]*timestamppb.Timestamp
}
type inMemoryKey struct {
kind pb.ServiceKind
endpoint string
}
// New constructs a new registry tracking service that operates in RAM
// as such, it is not usable for multi-node deployment where REDIS or other alternative should be used
func New(ctx context.Context, ttl time.Duration) registry.Interface {
srv := &inMemoryServer{
ttl: ttl,
regs: make(map[inMemoryKey]*timestamppb.Timestamp),
onchange: signal.New(),
}
go srv.periodicCheck(ctx)
return srv
}
func (s *inMemoryServer) periodicCheck(ctx context.Context) {
after := s.ttl * purgeAfterTTLFactor
for {
select {
case <-ctx.Done():
return
case <-time.After(after):
if s.lockAndRmExpired() {
s.onchange.Broadcast(ctx)
}
}
}
}
// Close closes the in memory server.
func (s *inMemoryServer) Close() error {
return nil
}
// Report is periodically sent by each service to confirm it is still serving with the registry
// data is persisted with a certain TTL
func (s *inMemoryServer) Report(ctx context.Context, req *pb.RegisterRequest) (*pb.RegisterResponse, error) {
if err := req.Validate(); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
updated, err := s.lockAndReport(req.Services)
if err != nil {
return nil, err
}
if updated {
s.onchange.Broadcast(ctx)
}
return &pb.RegisterResponse{
CallBackAfter: durationpb.New(s.ttl / callAfterTTLFactor),
}, nil
}
func (s *inMemoryServer) lockAndRmExpired() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.rmExpiredLocked()
}
func (s *inMemoryServer) rmExpiredLocked() bool {
now := time.Now()
removed := false
for k, expires := range s.regs {
if expires.AsTime().Before(now) {
delete(s.regs, k)
removed = true
}
}
return removed
}
// lockAndReport acquires lock, performs an update and returns current state of services
func (s *inMemoryServer) lockAndReport(services []*pb.Service) (bool, error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.reportLocked(services)
}
// reportLocked updates registration and also returns an indication whether service list was updated
func (s *inMemoryServer) reportLocked(services []*pb.Service) (bool, error) {
expires := timestamppb.New(time.Now().Add(s.ttl))
inserted := false
for _, svc := range services {
k := inMemoryKey{kind: svc.Kind, endpoint: svc.Endpoint}
if _, present := s.regs[k]; !present {
inserted = true
}
s.regs[k] = expires
}
removed := s.rmExpiredLocked()
return inserted || removed, nil
}
// List returns current snapshot of the services known to the registry
func (s *inMemoryServer) List(_ context.Context, req *pb.ListRequest) (*pb.ServiceList, error) {
if err := req.Validate(); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return &pb.ServiceList{Services: s.getServices(kindsMap(req.Kinds))}, nil
}
func kindsMap(kinds []pb.ServiceKind) map[pb.ServiceKind]bool {
out := make(map[pb.ServiceKind]bool, len(kinds))
for _, k := range kinds {
out[k] = true
}
return out
}
// Watch returns a stream of updates as full snapshots
func (s *inMemoryServer) Watch(req *pb.ListRequest, srv pb.Registry_WatchServer) error {
if err := req.Validate(); err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
kinds := kindsMap(req.Kinds)
ctx := srv.Context()
updates := s.onchange.Bind()
defer s.onchange.Unbind(updates)
if err := srv.Send(&pb.ServiceList{Services: s.getServices(kinds)}); err != nil {
return status.Errorf(codes.Internal, "sending initial snapshot: %v", err)
}
for {
services, err := s.getServiceUpdates(ctx, kinds, updates)
if err != nil {
return status.Errorf(codes.Internal, "obtaining service registrations: %v", err)
}
if err := srv.Send(&pb.ServiceList{Services: services}); err != nil {
return status.Errorf(codes.Internal, "sending registration snapshot: %v", err)
}
}
}
func (s *inMemoryServer) getServiceUpdates(ctx context.Context, kinds map[pb.ServiceKind]bool, updates chan context.Context) ([]*pb.Service, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-updates:
return s.getServices(kinds), nil
}
}
func (s *inMemoryServer) getServices(kinds map[pb.ServiceKind]bool) []*pb.Service {
s.mu.RLock()
defer s.mu.RUnlock()
return s.getServicesLocked(kinds)
}
func (s *inMemoryServer) getServicesLocked(kinds map[pb.ServiceKind]bool) []*pb.Service {
out := make([]*pb.Service, 0, len(s.regs))
for k := range s.regs {
if len(kinds) == 0 {
// all catch empty filter
} else if _, exists := kinds[k.kind]; !exists {
continue
}
out = append(out, &pb.Service{Kind: k.kind, Endpoint: k.endpoint})
}
return out
}
|
package main
import (
"fmt"
"net"
"os"
)
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage: ", os.Args[0], "host")
os.Exit(1)
}
service := os.Args[1]
conn, err := net.Dial("ip4:icmp", service)
checkError(err)
var msg [512]byte
msg[0] = 8
msg[1] = 0
msg[2] = 0
msg[3] = 0
msg[4] = 0
msg[5] = 13
msg[6] = 0
msg[7] = 37
len := 8
check
}
|
package main
func main() {
x, y := 1, 2
defer func(a int) {
println("defer x,y = ", a, y)
}(x)
x += 100
y += 200
println(x, y)
}
|
package main
import "fmt"
func main() {
const (
Enone = 0
Eio = 2
Einval = 5
)
a := [...]string{Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
s := []string{Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
m := map[int]string{Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
fmt.Println(a)
fmt.Println(s)
fmt.Println(m)
}
|
package main
import (
"fmt"
"sync"
)
var (
x int64
wg sync.WaitGroup
lock sync.Mutex
rwlock sync.RWMutex
)
func add() {
for i := 0; i < 50000; i++ {
lock.Lock() // 加锁
x = x + 1
lock.Unlock() // 解锁
}
wg.Done()
}
func Mutex() {
wg.Add(2)
go add()
go add()
wg.Wait()
fmt.Println(x)
}
func RWMutex() {
// 读写锁分为两种:读锁和写锁。
// 当一个 goroutine 获取读锁之后,其他的 goroutine 如果是获取读锁会继续获得锁,如果是获取写锁就会等待;
// 当一个 goroutine 获取写锁之后,其他的 goroutine 无论是获取读锁还是写锁都会等待。
}
func main() {
Mutex()
}
|
package main
import "fmt"
func main() {
var numOfCases int
fmt.Scanf("%d", &numOfCases)
for i := 0; i < numOfCases; i++ {
var x int
fmt.Scanf("%d", &x)
fmt.Printf("Fib(%d) = %d\n", x, fib(x))
}
}
// Gets the (n+1)th number in the Fibonacci sequence
func fib(n int) int64 {
if n == 0 {
return 0
}
if n == 1 {
return 1
}
lastTwo := []int64{0, 1}
counter := 2
for counter <= n {
nextFib := lastTwo[0] + lastTwo[1]
lastTwo[0] = lastTwo[1]
lastTwo[1] = nextFib
counter++
}
return lastTwo[1]
}
|
package login
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/foundriesio/fioctl/client"
"github.com/foundriesio/fioctl/subcommands"
)
func NewCommand() *cobra.Command {
return &cobra.Command{
Use: "login",
Short: "Access Foundries.io services with your client credentials",
Run: doLogin,
}
}
func doLogin(cmd *cobra.Command, args []string) {
logrus.Debug("Executing login command")
creds := client.NewClientCredentials(subcommands.Config.ClientCredentials)
if creds.Config.ClientId == "" || creds.Config.ClientSecret == "" {
creds.Config.ClientId, creds.Config.ClientSecret = promptForCreds()
}
if creds.Config.ClientId == "" || creds.Config.ClientSecret == "" {
fmt.Println("Cannot execute login without client ID or client secret.")
os.Exit(1)
}
expired, err := creds.IsExpired()
subcommands.DieNotNil(err)
if expired && creds.HasRefreshToken() {
subcommands.DieNotNil(creds.Refresh())
} else if creds.Config.AccessToken == "" {
subcommands.DieNotNil(creds.Get())
} else {
fmt.Println("You are already logged in to Foundries.io services.")
os.Exit(0)
}
subcommands.SaveOauthConfig(creds.Config)
fmt.Println("You are now logged in to Foundries.io services.")
}
func promptForCreds() (string, string) {
logrus.Debug("Reading client ID/secret from stdin")
scanner := bufio.NewScanner(os.Stdin)
fmt.Print("Please visit:\n\n")
fmt.Print(" https://app.foundries.io/settings/tokens/\n\n")
fmt.Print("and create a new \"Application Credential\" to provide inputs below.\n\n")
fmt.Print("Client ID: ")
scanner.Scan()
clientId := strings.Trim(scanner.Text(), " ")
fmt.Print("Client secret: ")
scanner.Scan()
clientSecret := strings.Trim(scanner.Text(), " ")
if clientId == "" || clientSecret == "" {
fmt.Println("Client ID and client credentials are both required.")
os.Exit(1)
}
return clientId, clientSecret
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.